repo_name
string
path
string
copies
string
size
string
content
string
license
string
bigbiff/android_kernel_samsung_sm-p605
net/ipv4/xfrm4_mode_beet.c
9342
3780
/* * xfrm4_mode_beet.c - BEET mode encapsulation for IPv4. * * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com> * Miika Komu <miika@iki.fi> * Herbert Xu <herbert@gondor.apana.org.au> * Abhinav Pathak <abhinav.pathak@hiit.fi> * Jeff Ahrenholz <ahrenholz@gmail.com> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/stringify.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> static void xfrm4_beet_make_header(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->ihl = 5; iph->version = 4; iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; iph->tos = XFRM_MODE_SKB_CB(skb)->tos; iph->id = XFRM_MODE_SKB_CB(skb)->id; iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off; iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl; } /* Add encapsulation header. * * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt. */ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) { struct ip_beet_phdr *ph; struct iphdr *top_iph; int hdrlen, optlen; hdrlen = 0; optlen = XFRM_MODE_SKB_CB(skb)->optlen; if (unlikely(optlen)) hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); skb_set_network_header(skb, -x->props.header_len - hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); if (x->sel.family != AF_INET6) skb->network_header += IPV4_BEET_PHMAXLEN; skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm4_beet_make_header(skb); ph = (struct ip_beet_phdr *) __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen); top_iph = ip_hdr(skb); if (unlikely(optlen)) { BUG_ON(optlen < 0); ph->padlen = 4 - (optlen & 4); ph->hdrlen = optlen / 8; ph->nexthdr = top_iph->protocol; if (ph->padlen) memset(ph + 1, IPOPT_NOP, ph->padlen); top_iph->protocol = IPPROTO_BEETPH; top_iph->ihl = sizeof(struct iphdr) / 4; } top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; return 0; } static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph; int optlen = 0; int err = -EINVAL; if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { struct ip_beet_phdr *ph; int phlen; if (!pskb_may_pull(skb, sizeof(*ph))) goto out; ph = (struct ip_beet_phdr *)skb->data; phlen = sizeof(*ph) + ph->padlen; optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); if (optlen < 0 || optlen & 3 || optlen > 250) goto out; XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; if (!pskb_may_pull(skb, phlen)) goto out; __skb_pull(skb, phlen); } skb_push(skb, sizeof(*iph)); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); xfrm4_beet_make_header(skb); iph = ip_hdr(skb); iph->ihl += optlen / 4; iph->tot_len = htons(skb->len); iph->daddr = x->sel.daddr.a4; iph->saddr = x->sel.saddr.a4; iph->check = 0; iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); err = 0; out: return err; } static struct xfrm_mode xfrm4_beet_mode = { .input2 = xfrm4_beet_input, .input = xfrm_prepare_input, .output2 = xfrm4_beet_output, .output = xfrm4_prepare_output, .owner = THIS_MODULE, .encap = XFRM_MODE_BEET, .flags = XFRM_MODE_FLAG_TUNNEL, }; static int __init xfrm4_beet_init(void) { return xfrm_register_mode(&xfrm4_beet_mode, AF_INET); } static void __exit xfrm4_beet_exit(void) { int err; err = xfrm_unregister_mode(&xfrm4_beet_mode, AF_INET); BUG_ON(err); } module_init(xfrm4_beet_init); module_exit(xfrm4_beet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_BEET);
gpl-2.0
upndwn4par/kernel_jflteatt_stockup
net/ipv4/xfrm4_mode_beet.c
9342
3780
/* * xfrm4_mode_beet.c - BEET mode encapsulation for IPv4. * * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com> * Miika Komu <miika@iki.fi> * Herbert Xu <herbert@gondor.apana.org.au> * Abhinav Pathak <abhinav.pathak@hiit.fi> * Jeff Ahrenholz <ahrenholz@gmail.com> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/stringify.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> static void xfrm4_beet_make_header(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->ihl = 5; iph->version = 4; iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol; iph->tos = XFRM_MODE_SKB_CB(skb)->tos; iph->id = XFRM_MODE_SKB_CB(skb)->id; iph->frag_off = XFRM_MODE_SKB_CB(skb)->frag_off; iph->ttl = XFRM_MODE_SKB_CB(skb)->ttl; } /* Add encapsulation header. * * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt. */ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) { struct ip_beet_phdr *ph; struct iphdr *top_iph; int hdrlen, optlen; hdrlen = 0; optlen = XFRM_MODE_SKB_CB(skb)->optlen; if (unlikely(optlen)) hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); skb_set_network_header(skb, -x->props.header_len - hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); if (x->sel.family != AF_INET6) skb->network_header += IPV4_BEET_PHMAXLEN; skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + sizeof(*top_iph); xfrm4_beet_make_header(skb); ph = (struct ip_beet_phdr *) __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen); top_iph = ip_hdr(skb); if (unlikely(optlen)) { BUG_ON(optlen < 0); ph->padlen = 4 - (optlen & 4); ph->hdrlen = optlen / 8; ph->nexthdr = top_iph->protocol; if (ph->padlen) memset(ph + 1, IPOPT_NOP, ph->padlen); top_iph->protocol = IPPROTO_BEETPH; top_iph->ihl = sizeof(struct iphdr) / 4; } top_iph->saddr = x->props.saddr.a4; top_iph->daddr = x->id.daddr.a4; return 0; } static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph; int optlen = 0; int err = -EINVAL; if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { struct ip_beet_phdr *ph; int phlen; if (!pskb_may_pull(skb, sizeof(*ph))) goto out; ph = (struct ip_beet_phdr *)skb->data; phlen = sizeof(*ph) + ph->padlen; optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen); if (optlen < 0 || optlen & 3 || optlen > 250) goto out; XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr; if (!pskb_may_pull(skb, phlen)) goto out; __skb_pull(skb, phlen); } skb_push(skb, sizeof(*iph)); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); xfrm4_beet_make_header(skb); iph = ip_hdr(skb); iph->ihl += optlen / 4; iph->tot_len = htons(skb->len); iph->daddr = x->sel.daddr.a4; iph->saddr = x->sel.saddr.a4; iph->check = 0; iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); err = 0; out: return err; } static struct xfrm_mode xfrm4_beet_mode = { .input2 = xfrm4_beet_input, .input = xfrm_prepare_input, .output2 = xfrm4_beet_output, .output = xfrm4_prepare_output, .owner = THIS_MODULE, .encap = XFRM_MODE_BEET, .flags = XFRM_MODE_FLAG_TUNNEL, }; static int __init xfrm4_beet_init(void) { return xfrm_register_mode(&xfrm4_beet_mode, AF_INET); } static void __exit xfrm4_beet_exit(void) { int err; err = xfrm_unregister_mode(&xfrm4_beet_mode, AF_INET); BUG_ON(err); } module_init(xfrm4_beet_init); module_exit(xfrm4_beet_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_BEET);
gpl-2.0
foomango/linux-3.7.1
arch/mips/math-emu/sp_fint.c
10366
1865
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" ieee754sp ieee754sp_fint(int x) { unsigned xm; int xe; int xs; CLEARCX; if (x == 0) return ieee754sp_zero(0); if (x == 1 || x == -1) return ieee754sp_one(x < 0); if (x == 10 || x == -10) return ieee754sp_ten(x < 0); xs = (x < 0); if (xs) { if (x == (1 << 31)) xm = ((unsigned) 1 << 31); /* max neg can't be safely negated */ else xm = -x; } else { xm = x; } xe = SP_MBITS + 3; if (xm >> (SP_MBITS + 1 + 3)) { /* shunt out overflow bits */ while (xm >> (SP_MBITS + 1 + 3)) { SPXSRSX1(); } } else { /* normalize in grs extended single precision */ while ((xm >> (SP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } SPNORMRET1(xs, xe, xm, "fint", x); } ieee754sp ieee754sp_funs(unsigned int u) { if ((int) u < 0) return ieee754sp_add(ieee754sp_1e31(), ieee754sp_fint(u & ~(1 << 31))); return ieee754sp_fint(u); }
gpl-2.0
fredvj/kernel_huawei_u8860
arch/mips/math-emu/sp_fdp.c
10366
2130
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" ieee754sp ieee754sp_fdp(ieee754dp x) { COMPXDP; ieee754sp nan; EXPLODEXDP; CLEARCX; FLUSHXDP; switch (xc) { case IEEE754_CLASS_SNAN: SETCX(IEEE754_INVALID_OPERATION); return ieee754sp_nanxcpt(ieee754sp_indef(), "fdp"); case IEEE754_CLASS_QNAN: nan = buildsp(xs, SP_EMAX + 1 + SP_EBIAS, (u32) (xm >> (DP_MBITS - SP_MBITS))); if (!ieee754sp_isnan(nan)) nan = ieee754sp_indef(); return ieee754sp_nanxcpt(nan, "fdp", x); case IEEE754_CLASS_INF: return ieee754sp_inf(xs); case IEEE754_CLASS_ZERO: return ieee754sp_zero(xs); case IEEE754_CLASS_DNORM: /* can't possibly be sp representable */ SETCX(IEEE754_UNDERFLOW); SETCX(IEEE754_INEXACT); if ((ieee754_csr.rm == IEEE754_RU && !xs) || (ieee754_csr.rm == IEEE754_RD && xs)) return ieee754sp_xcpt(ieee754sp_mind(xs), "fdp", x); return ieee754sp_xcpt(ieee754sp_zero(xs), "fdp", x); case IEEE754_CLASS_NORM: break; } { u32 rm; /* convert from DP_MBITS to SP_MBITS+3 with sticky right shift */ rm = (xm >> (DP_MBITS - (SP_MBITS + 3))) | ((xm << (64 - (DP_MBITS - (SP_MBITS + 3)))) != 0); SPNORMRET1(xs, xe, rm, "fdp", x); } }
gpl-2.0
andygross/omap_dmm_tiler
drivers/hid/hid-roccat-koneplus.c
127
21363
/* * Roccat Kone[+] driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* * Roccat Kone[+] is an updated/improved version of the Kone with more memory * and functionality and without the non-standard behaviours the Kone had. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-koneplus.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; static struct class *koneplus_class; static void koneplus_profile_activated(struct koneplus_device *koneplus, uint new_profile) { koneplus->actual_profile = new_profile; } static int koneplus_send_control(struct usb_device *usb_dev, uint value, enum koneplus_control_requests request) { struct roccat_common2_control control; if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS || request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) && value > 4) return -EINVAL; control.command = ROCCAT_COMMON_COMMAND_CONTROL; control.value = value; control.request = request; return roccat_common2_send_with_status(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL, &control, sizeof(struct roccat_common2_control)); } static int koneplus_get_info(struct usb_device *usb_dev, struct koneplus_info *buf) { return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO, buf, sizeof(struct koneplus_info)); } static int koneplus_get_profile_settings(struct usb_device *usb_dev, struct koneplus_profile_settings *buf, uint number) { int retval; retval = koneplus_send_control(usb_dev, number, KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS, buf, sizeof(struct koneplus_profile_settings)); } static int koneplus_set_profile_settings(struct usb_device *usb_dev, struct koneplus_profile_settings const *settings) { return roccat_common2_send_with_status(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS, settings, sizeof(struct koneplus_profile_settings)); } static int koneplus_get_profile_buttons(struct usb_device *usb_dev, struct koneplus_profile_buttons *buf, int number) { int retval; retval = koneplus_send_control(usb_dev, number, KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS, buf, sizeof(struct koneplus_profile_buttons)); } static int koneplus_set_profile_buttons(struct usb_device *usb_dev, struct koneplus_profile_buttons const *buttons) { return roccat_common2_send_with_status(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS, buttons, sizeof(struct koneplus_profile_buttons)); } /* retval is 0-4 on success, < 0 on error */ static int koneplus_get_actual_profile(struct usb_device *usb_dev) { struct koneplus_actual_profile buf; int retval; retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct koneplus_actual_profile)); return retval ? retval : buf.actual_profile; } static int koneplus_set_actual_profile(struct usb_device *usb_dev, int new_profile) { struct koneplus_actual_profile buf; buf.command = KONEPLUS_COMMAND_ACTUAL_PROFILE; buf.size = sizeof(struct koneplus_actual_profile); buf.actual_profile = new_profile; return roccat_common2_send_with_status(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct koneplus_actual_profile)); } static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&koneplus->koneplus_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&koneplus->koneplus_lock); if (retval) return retval; return real_size; } static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&koneplus->koneplus_lock); retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size); mutex_unlock(&koneplus->koneplus_lock); if (retval) return retval; return real_size; } static ssize_t koneplus_sysfs_write_talk(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_write(fp, kobj, buf, off, count, sizeof(struct koneplus_talk), KONEPLUS_COMMAND_TALK); } static ssize_t koneplus_sysfs_write_macro(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_write(fp, kobj, buf, off, count, sizeof(struct koneplus_macro), KONEPLUS_COMMAND_MACRO); } static ssize_t koneplus_sysfs_read_sensor(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_read(fp, kobj, buf, off, count, sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR); } static ssize_t koneplus_sysfs_write_sensor(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_write(fp, kobj, buf, off, count, sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR); } static ssize_t koneplus_sysfs_write_tcu(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_write(fp, kobj, buf, off, count, sizeof(struct koneplus_tcu), KONEPLUS_COMMAND_TCU); } static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return koneplus_sysfs_read(fp, kobj, buf, off, count, sizeof(struct koneplus_tcu_image), KONEPLUS_COMMAND_TCU); } static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct koneplus_profile_settings)) return 0; if (off + count > sizeof(struct koneplus_profile_settings)) count = sizeof(struct koneplus_profile_settings) - off; mutex_lock(&koneplus->koneplus_lock); memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off, count); mutex_unlock(&koneplus->koneplus_lock); return count; } static ssize_t koneplus_sysfs_write_profile_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; int difference; int profile_number; struct koneplus_profile_settings *profile_settings; if (off != 0 || count != sizeof(struct koneplus_profile_settings)) return -EINVAL; profile_number = ((struct koneplus_profile_settings const *)buf)->number; profile_settings = &koneplus->profile_settings[profile_number]; mutex_lock(&koneplus->koneplus_lock); difference = memcmp(buf, profile_settings, sizeof(struct koneplus_profile_settings)); if (difference) { retval = koneplus_set_profile_settings(usb_dev, (struct koneplus_profile_settings const *)buf); if (!retval) memcpy(profile_settings, buf, sizeof(struct koneplus_profile_settings)); } mutex_unlock(&koneplus->koneplus_lock); if (retval) return retval; return sizeof(struct koneplus_profile_settings); } static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); if (off >= sizeof(struct koneplus_profile_buttons)) return 0; if (off + count > sizeof(struct koneplus_profile_buttons)) count = sizeof(struct koneplus_profile_buttons) - off; mutex_lock(&koneplus->koneplus_lock); memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off, count); mutex_unlock(&koneplus->koneplus_lock); return count; } static ssize_t koneplus_sysfs_write_profile_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj)->parent->parent; struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval = 0; int difference; uint profile_number; struct koneplus_profile_buttons *profile_buttons; if (off != 0 || count != sizeof(struct koneplus_profile_buttons)) return -EINVAL; profile_number = ((struct koneplus_profile_buttons const *)buf)->number; profile_buttons = &koneplus->profile_buttons[profile_number]; mutex_lock(&koneplus->koneplus_lock); difference = memcmp(buf, profile_buttons, sizeof(struct koneplus_profile_buttons)); if (difference) { retval = koneplus_set_profile_buttons(usb_dev, (struct koneplus_profile_buttons const *)buf); if (!retval) memcpy(profile_buttons, buf, sizeof(struct koneplus_profile_buttons)); } mutex_unlock(&koneplus->koneplus_lock); if (retval) return retval; return sizeof(struct koneplus_profile_buttons); } static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->actual_profile); } static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct koneplus_device *koneplus; struct usb_device *usb_dev; unsigned long profile; int retval; struct koneplus_roccat_report roccat_report; dev = dev->parent->parent; koneplus = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = strict_strtoul(buf, 10, &profile); if (retval) return retval; if (profile > 4) return -EINVAL; mutex_lock(&koneplus->koneplus_lock); retval = koneplus_set_actual_profile(usb_dev, profile); if (retval) { mutex_unlock(&koneplus->koneplus_lock); return retval; } koneplus_profile_activated(koneplus, profile); roccat_report.type = KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE; roccat_report.data1 = profile + 1; roccat_report.data2 = 0; roccat_report.profile = profile + 1; roccat_report_event(koneplus->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&koneplus->koneplus_lock); return size; } static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct koneplus_device *koneplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return snprintf(buf, PAGE_SIZE, "%d\n", koneplus->info.firmware_version); } static struct device_attribute koneplus_attributes[] = { __ATTR(actual_profile, 0660, koneplus_sysfs_show_actual_profile, koneplus_sysfs_set_actual_profile), __ATTR(startup_profile, 0660, koneplus_sysfs_show_actual_profile, koneplus_sysfs_set_actual_profile), __ATTR(firmware_version, 0440, koneplus_sysfs_show_firmware_version, NULL), __ATTR_NULL }; static struct bin_attribute koneplus_bin_attributes[] = { { .attr = { .name = "sensor", .mode = 0660 }, .size = sizeof(struct koneplus_sensor), .read = koneplus_sysfs_read_sensor, .write = koneplus_sysfs_write_sensor }, { .attr = { .name = "tcu", .mode = 0220 }, .size = sizeof(struct koneplus_tcu), .write = koneplus_sysfs_write_tcu }, { .attr = { .name = "tcu_image", .mode = 0440 }, .size = sizeof(struct koneplus_tcu_image), .read = koneplus_sysfs_read_tcu_image }, { .attr = { .name = "profile_settings", .mode = 0220 }, .size = sizeof(struct koneplus_profile_settings), .write = koneplus_sysfs_write_profile_settings }, { .attr = { .name = "profile1_settings", .mode = 0440 }, .size = sizeof(struct koneplus_profile_settings), .read = koneplus_sysfs_read_profilex_settings, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_settings", .mode = 0440 }, .size = sizeof(struct koneplus_profile_settings), .read = koneplus_sysfs_read_profilex_settings, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_settings", .mode = 0440 }, .size = sizeof(struct koneplus_profile_settings), .read = koneplus_sysfs_read_profilex_settings, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_settings", .mode = 0440 }, .size = sizeof(struct koneplus_profile_settings), .read = koneplus_sysfs_read_profilex_settings, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_settings", .mode = 0440 }, .size = sizeof(struct koneplus_profile_settings), .read = koneplus_sysfs_read_profilex_settings, .private = &profile_numbers[4] }, { .attr = { .name = "profile_buttons", .mode = 0220 }, .size = sizeof(struct koneplus_profile_buttons), .write = koneplus_sysfs_write_profile_buttons }, { .attr = { .name = "profile1_buttons", .mode = 0440 }, .size = sizeof(struct koneplus_profile_buttons), .read = koneplus_sysfs_read_profilex_buttons, .private = &profile_numbers[0] }, { .attr = { .name = "profile2_buttons", .mode = 0440 }, .size = sizeof(struct koneplus_profile_buttons), .read = koneplus_sysfs_read_profilex_buttons, .private = &profile_numbers[1] }, { .attr = { .name = "profile3_buttons", .mode = 0440 }, .size = sizeof(struct koneplus_profile_buttons), .read = koneplus_sysfs_read_profilex_buttons, .private = &profile_numbers[2] }, { .attr = { .name = "profile4_buttons", .mode = 0440 }, .size = sizeof(struct koneplus_profile_buttons), .read = koneplus_sysfs_read_profilex_buttons, .private = &profile_numbers[3] }, { .attr = { .name = "profile5_buttons", .mode = 0440 }, .size = sizeof(struct koneplus_profile_buttons), .read = koneplus_sysfs_read_profilex_buttons, .private = &profile_numbers[4] }, { .attr = { .name = "macro", .mode = 0220 }, .size = sizeof(struct koneplus_macro), .write = koneplus_sysfs_write_macro }, { .attr = { .name = "talk", .mode = 0220 }, .size = sizeof(struct koneplus_talk), .write = koneplus_sysfs_write_talk }, __ATTR_NULL }; static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev, struct koneplus_device *koneplus) { int retval, i; static uint wait = 200; mutex_init(&koneplus->koneplus_lock); retval = koneplus_get_info(usb_dev, &koneplus->info); if (retval) return retval; for (i = 0; i < 5; ++i) { msleep(wait); retval = koneplus_get_profile_settings(usb_dev, &koneplus->profile_settings[i], i); if (retval) return retval; msleep(wait); retval = koneplus_get_profile_buttons(usb_dev, &koneplus->profile_buttons[i], i); if (retval) return retval; } msleep(wait); retval = koneplus_get_actual_profile(usb_dev); if (retval < 0) return retval; koneplus_profile_activated(koneplus, retval); return 0; } static int koneplus_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct koneplus_device *koneplus; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL); if (!koneplus) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, koneplus); retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus); if (retval) { hid_err(hdev, "couldn't init struct koneplus_device\n"); goto exit_free; } retval = roccat_connect(koneplus_class, hdev, sizeof(struct koneplus_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { koneplus->chrdev_minor = retval; koneplus->roccat_claimed = 1; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(koneplus); return retval; } static void koneplus_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct koneplus_device *koneplus; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { koneplus = hid_get_drvdata(hdev); if (koneplus->roccat_claimed) roccat_disconnect(koneplus->chrdev_minor); kfree(koneplus); } } static int koneplus_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = koneplus_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void koneplus_remove(struct hid_device *hdev) { koneplus_remove_specials(hdev); hid_hw_stop(hdev); } static void koneplus_keep_values_up_to_date(struct koneplus_device *koneplus, u8 const *data) { struct koneplus_mouse_report_button const *button_report; switch (data[0]) { case KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON: button_report = (struct koneplus_mouse_report_button const *)data; switch (button_report->type) { case KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE: koneplus_profile_activated(koneplus, button_report->data1 - 1); break; } break; } } static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus, u8 const *data) { struct koneplus_roccat_report roccat_report; struct koneplus_mouse_report_button const *button_report; if (data[0] != KONEPLUS_MOUSE_REPORT_NUMBER_BUTTON) return; button_report = (struct koneplus_mouse_report_button const *)data; if ((button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH || button_report->type == KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER) && button_report->data2 != KONEPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS) return; roccat_report.type = button_report->type; roccat_report.data1 = button_report->data1; roccat_report.data2 = button_report->data2; roccat_report.profile = koneplus->actual_profile + 1; roccat_report_event(koneplus->chrdev_minor, (uint8_t const *)&roccat_report); } static int koneplus_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct koneplus_device *koneplus = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (koneplus == NULL) return 0; koneplus_keep_values_up_to_date(koneplus, data); if (koneplus->roccat_claimed) koneplus_report_to_chrdev(koneplus, data); return 0; } static const struct hid_device_id koneplus_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) }, { } }; MODULE_DEVICE_TABLE(hid, koneplus_devices); static struct hid_driver koneplus_driver = { .name = "koneplus", .id_table = koneplus_devices, .probe = koneplus_probe, .remove = koneplus_remove, .raw_event = koneplus_raw_event }; static int __init koneplus_init(void) { int retval; /* class name has to be same as driver name */ koneplus_class = class_create(THIS_MODULE, "koneplus"); if (IS_ERR(koneplus_class)) return PTR_ERR(koneplus_class); koneplus_class->dev_attrs = koneplus_attributes; koneplus_class->dev_bin_attrs = koneplus_bin_attributes; retval = hid_register_driver(&koneplus_driver); if (retval) class_destroy(koneplus_class); return retval; } static void __exit koneplus_exit(void) { hid_unregister_driver(&koneplus_driver); class_destroy(koneplus_class); } module_init(koneplus_init); module_exit(koneplus_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Kone[+] driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
agayev/linux
drivers/mfd/max77843.c
127
6182
/* * MFD core driver for the Maxim MAX77843 * * Copyright (C) 2015 Samsung Electronics * Author: Jaewon Kim <jaewon02.kim@samsung.com> * Author: Beomho Seo <beomho.seo@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/err.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mfd/core.h> #include <linux/mfd/max77693-common.h> #include <linux/mfd/max77843-private.h> #include <linux/of_device.h> #include <linux/platform_device.h> static const struct mfd_cell max77843_devs[] = { { .name = "max77843-muic", .of_compatible = "maxim,max77843-muic", }, { .name = "max77843-regulator", .of_compatible = "maxim,max77843-regulator", }, { .name = "max77843-charger", .of_compatible = "maxim,max77843-charger" }, { .name = "max77843-fuelgauge", .of_compatible = "maxim,max77843-fuelgauge", }, { .name = "max77843-haptic", .of_compatible = "maxim,max77843-haptic", }, }; static const struct regmap_config max77843_charger_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MAX77843_CHG_REG_END, }; static const struct regmap_config max77843_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MAX77843_SYS_REG_END, }; static const struct regmap_irq max77843_irqs[] = { /* TOPSYS interrupts */ { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_SYSUVLO_INT, }, { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_SYSOVLO_INT, }, { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_TSHDN_INT, }, { .reg_offset = 0, .mask = MAX77843_SYS_IRQ_TM_INT, }, }; static const struct regmap_irq_chip max77843_irq_chip = { .name = "max77843", .status_base = MAX77843_SYS_REG_SYSINTSRC, .mask_base = MAX77843_SYS_REG_SYSINTMASK, .mask_invert = false, .num_regs = 1, .irqs = max77843_irqs, .num_irqs = ARRAY_SIZE(max77843_irqs), }; /* Charger and Charger regulator use same regmap. */ static int max77843_chg_init(struct max77693_dev *max77843) { int ret; max77843->i2c_chg = i2c_new_dummy(max77843->i2c->adapter, I2C_ADDR_CHG); if (!max77843->i2c_chg) { dev_err(&max77843->i2c->dev, "Cannot allocate I2C device for Charger\n"); return -ENODEV; } i2c_set_clientdata(max77843->i2c_chg, max77843); max77843->regmap_chg = devm_regmap_init_i2c(max77843->i2c_chg, &max77843_charger_regmap_config); if (IS_ERR(max77843->regmap_chg)) { ret = PTR_ERR(max77843->regmap_chg); goto err_chg_i2c; } return 0; err_chg_i2c: i2c_unregister_device(max77843->i2c_chg); return ret; } static int max77843_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max77693_dev *max77843; unsigned int reg_data; int ret; max77843 = devm_kzalloc(&i2c->dev, sizeof(*max77843), GFP_KERNEL); if (!max77843) return -ENOMEM; i2c_set_clientdata(i2c, max77843); max77843->dev = &i2c->dev; max77843->i2c = i2c; max77843->irq = i2c->irq; max77843->type = id->driver_data; max77843->regmap = devm_regmap_init_i2c(i2c, &max77843_regmap_config); if (IS_ERR(max77843->regmap)) { dev_err(&i2c->dev, "Failed to allocate topsys register map\n"); return PTR_ERR(max77843->regmap); } ret = regmap_add_irq_chip(max77843->regmap, max77843->irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED, 0, &max77843_irq_chip, &max77843->irq_data_topsys); if (ret) { dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n"); return ret; } ret = regmap_read(max77843->regmap, MAX77843_SYS_REG_PMICID, &reg_data); if (ret < 0) { dev_err(&i2c->dev, "Failed to read PMIC ID\n"); goto err_pmic_id; } dev_info(&i2c->dev, "device ID: 0x%x\n", reg_data); ret = max77843_chg_init(max77843); if (ret) { dev_err(&i2c->dev, "Failed to init Charger\n"); goto err_pmic_id; } ret = regmap_update_bits(max77843->regmap, MAX77843_SYS_REG_INTSRCMASK, MAX77843_INTSRC_MASK_MASK, (unsigned int)~MAX77843_INTSRC_MASK_MASK); if (ret < 0) { dev_err(&i2c->dev, "Failed to unmask interrupt source\n"); goto err_pmic_id; } ret = mfd_add_devices(max77843->dev, -1, max77843_devs, ARRAY_SIZE(max77843_devs), NULL, 0, NULL); if (ret < 0) { dev_err(&i2c->dev, "Failed to add mfd device\n"); goto err_pmic_id; } device_init_wakeup(max77843->dev, true); return 0; err_pmic_id: regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys); return ret; } static int max77843_remove(struct i2c_client *i2c) { struct max77693_dev *max77843 = i2c_get_clientdata(i2c); mfd_remove_devices(max77843->dev); regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys); i2c_unregister_device(max77843->i2c_chg); return 0; } static const struct of_device_id max77843_dt_match[] = { { .compatible = "maxim,max77843", }, { }, }; static const struct i2c_device_id max77843_id[] = { { "max77843", TYPE_MAX77843, }, { }, }; MODULE_DEVICE_TABLE(i2c, max77843_id); static int __maybe_unused max77843_suspend(struct device *dev) { struct i2c_client *i2c = to_i2c_client(dev); struct max77693_dev *max77843 = i2c_get_clientdata(i2c); disable_irq(max77843->irq); if (device_may_wakeup(dev)) enable_irq_wake(max77843->irq); return 0; } static int __maybe_unused max77843_resume(struct device *dev) { struct i2c_client *i2c = to_i2c_client(dev); struct max77693_dev *max77843 = i2c_get_clientdata(i2c); if (device_may_wakeup(dev)) disable_irq_wake(max77843->irq); enable_irq(max77843->irq); return 0; } static SIMPLE_DEV_PM_OPS(max77843_pm, max77843_suspend, max77843_resume); static struct i2c_driver max77843_i2c_driver = { .driver = { .name = "max77843", .pm = &max77843_pm, .of_match_table = max77843_dt_match, }, .probe = max77843_probe, .remove = max77843_remove, .id_table = max77843_id, }; static int __init max77843_i2c_init(void) { return i2c_add_driver(&max77843_i2c_driver); } subsys_initcall(max77843_i2c_init); static void __exit max77843_i2c_exit(void) { i2c_del_driver(&max77843_i2c_driver); } module_exit(max77843_i2c_exit);
gpl-2.0
civato/SphereKat-Note8.0-KitKat
arch/arm/mach-exynos/kona-sensor.c
127
6032
#include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> #include <linux/delay.h> #include <plat/gpio-cfg.h> #include <plat/iic.h> #include <plat/devs.h> #include <mach/regs-gpio.h> #include <mach/gpio.h> #include <mach/gpio-rev00-kona.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include "midas.h" #include <linux/sensor/sensors_core.h> #include <linux/sensor/yas.h> #include <linux/sensor/gp2a.h> #include <mach/kona-sensor.h> #if defined(CONFIG_SENSORS_BMA254) || defined(CONFIG_SENSORS_K3DH) static int accel_gpio_init(void) { int ret = gpio_request(GPIO_ACC_INT, "accelerometer_irq"); pr_info("%s\n", __func__); if (ret) { pr_err("%s, Failed to request gpio accelerometer_irq(%d)\n", __func__, ret); return ret; } s3c_gpio_cfgpin(GPIO_ACC_INT, S3C_GPIO_INPUT); gpio_set_value(GPIO_ACC_INT, 2); s3c_gpio_setpull(GPIO_ACC_INT, S3C_GPIO_PULL_NONE); s5p_gpio_set_drvstr(GPIO_ACC_INT, S5P_GPIO_DRVSTR_LV1); return ret; } static u8 acceleromter_get_position(void) { int position = 0; #if defined(CONFIG_TARGET_LOCALE_USA) if (system_rev >= 3) position = 4; else if (system_rev >= 1) position = 3; else position = 4; #elif defined(CONFIG_MACH_KONA_EUR_LTE) if (system_rev >= 3) position = 4; else if (system_rev >= 1) position = 3; else position = 4; #elif defined(CONFIG_MACH_KONA) if (system_rev >= 1) position = 4; else position = 4; #else position = 4; #endif return position; } static struct accel_platform_data accel_pdata = { .accel_get_position = acceleromter_get_position, .axis_adjust = true, }; #if defined(CONFIG_SENSORS_BMA254) static struct i2c_board_info i2c_devs1_1[] __initdata = { { I2C_BOARD_INFO("bma254", 0x18), .platform_data = &accel_pdata, .irq = IRQ_EINT(0), }, }; #endif #if defined(CONFIG_SENSORS_K3DH) static struct i2c_board_info i2c_devs1[] __initdata = { { I2C_BOARD_INFO("k3dh", 0x19), .platform_data = &accel_pdata, .irq = IRQ_EINT(0), }, }; #endif #endif #ifdef CONFIG_SENSORS_YAS532 static struct i2c_gpio_platform_data gpio_i2c_data10 = { .sda_pin = GPIO_MSENSOR_SDA_18V, .scl_pin = GPIO_MSENSOR_SCL_18V, }; struct platform_device s3c_device_i2c10 = { .name = "i2c-gpio", .id = 10, .dev.platform_data = &gpio_i2c_data10, }; static struct mag_platform_data magnetic_pdata = { .offset_enable = 0, .chg_status = CABLE_TYPE_NONE, .ta_offset.v = {0, 0, 0}, .usb_offset.v = {0, 0, 0}, .full_offset.v = {0, 0, 0}, }; static struct i2c_board_info i2c_devs10_emul[] __initdata = { { I2C_BOARD_INFO("yas532", 0x2e), .platform_data = &magnetic_pdata, }, }; #endif #ifdef CONFIG_SENSORS_GP2A static int proximity_leda_on(bool onoff) { pr_info("%s, onoff = %d\n", __func__, onoff); gpio_set_value(GPIO_PS_ALS_EN, onoff); return 0; } static int optical_gpio_init(void) { int ret = gpio_request(GPIO_PS_ALS_EN, "optical_power_supply_on"); pr_info("%s\n", __func__); if (ret) { pr_err("%s, Failed to request gpio optical power supply(%d)\n", __func__, ret); return ret; } /* configuring for gp2a gpio for LEDA power */ s3c_gpio_cfgpin(GPIO_PS_ALS_EN, S3C_GPIO_OUTPUT); gpio_set_value(GPIO_PS_ALS_EN, 0); s3c_gpio_setpull(GPIO_PS_ALS_EN, S3C_GPIO_PULL_NONE); return ret; } static unsigned long gp2a_get_threshold(u8 *thesh_diff) { u8 threshold = 0x09; if (thesh_diff) *thesh_diff = 1; if (thesh_diff) pr_info("%s, threshold low = 0x%x, high = 0x%x\n", __func__, threshold, (threshold + *thesh_diff)); else pr_info("%s, threshold = 0x%x\n", __func__, threshold); return threshold; } static struct gp2a_platform_data gp2a_pdata = { .gp2a_led_on = proximity_leda_on, .p_out = GPIO_PS_ALS_INT, .gp2a_get_threshold = gp2a_get_threshold, }; static struct platform_device opt_gp2a = { .name = "gp2a-opt", .id = -1, .dev = { .platform_data = &gp2a_pdata, }, }; static struct platform_device light_gp2a = { .name = "light_sensor", .id = -1, }; #endif #if defined(CONFIG_SENSORS_GP2A) || defined(CONFIG_SENSORS_AL3201) static struct i2c_gpio_platform_data gpio_i2c_data12 = { .sda_pin = GPIO_PS_ALS_SDA_28V, .scl_pin = GPIO_PS_ALS_SCL_28V, }; struct platform_device s3c_device_i2c12 = { .name = "i2c-gpio", .id = 12, .dev.platform_data = &gpio_i2c_data12, }; static struct i2c_board_info i2c_devs12_emul[] __initdata = { #if defined(CONFIG_SENSORS_AL3201) {I2C_BOARD_INFO("AL3201", 0x1c),}, #endif #if defined(CONFIG_SENSORS_GP2A) {I2C_BOARD_INFO("gp2a", 0x39),}, #endif }; #endif static struct platform_device *kona_sensor_devices[] __initdata = { #if defined(CONFIG_SENSORS_BMA254) || defined(CONFIG_SENSORS_K3DH) &s3c_device_i2c1, #endif #ifdef CONFIG_SENSORS_YAS532 &s3c_device_i2c10, #endif #if defined(CONFIG_SENSORS_GP2A) || defined(CONFIG_SENSORS_AL3201) &s3c_device_i2c12, #endif }; int kona_sensor_init(void) { int ret = 0; /* accelerometer sensor */ pr_info("%s, is called\n", __func__); #if defined(CONFIG_SENSORS_BMA254) || defined(CONFIG_SENSORS_K3DH) s3c_i2c1_set_platdata(NULL); i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); #endif #ifdef CONFIG_SENSORS_YAS532 /* magnetic sensor */ i2c_register_board_info(10, i2c_devs10_emul, ARRAY_SIZE(i2c_devs10_emul)); #endif #ifdef CONFIG_SENSORS_GP2A /* optical sensor */ ret = optical_gpio_init(); if (ret < 0) pr_err("%s, optical_gpio_init fail(err=%d)\n", __func__, ret); i2c_register_board_info(12, i2c_devs12_emul, ARRAY_SIZE(i2c_devs12_emul)); ret = platform_device_register(&opt_gp2a); if (ret < 0) { pr_err("%s, failed to register opt_gp2a(err=%d)\n", __func__, ret); return ret; } ret = platform_device_register(&light_gp2a); if (ret < 0) { pr_err("%s, failed to register light_gp2a(err=%d)\n", __func__, ret); return ret; } #elif defined(CONFIG_SENSORS_AL3201) i2c_register_board_info(12, i2c_devs12_emul, ARRAY_SIZE(i2c_devs12_emul)); #endif platform_add_devices(kona_sensor_devices, ARRAY_SIZE(kona_sensor_devices)); return ret; }
gpl-2.0
Hardslog/grimlock_kernel_asus_tegra3_unified
kernel/trace/trace_irqsoff.c
383
15664
/* * trace irqs off critical timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * From code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/kallsyms.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/fs.h> #include "trace.h" static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; static DEFINE_PER_CPU(int, tracing_cpu); static DEFINE_SPINLOCK(max_trace_lock); enum { TRACER_IRQS_OFF = (1 << 1), TRACER_PREEMPT_OFF = (1 << 2), }; static int trace_type __read_mostly; static int save_lat_flag; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) { return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); } #else # define preempt_trace() (0) #endif #ifdef CONFIG_IRQSOFF_TRACER static inline int irq_trace(void) { return ((trace_type & TRACER_IRQS_OFF) && irqs_disabled()); } #else # define irq_trace() (0) #endif #define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) /* * Sequence count - we record it when starting a measurement and * skip the latency if the sequence has changed - some other section * did a maximum and could disturb our measurement with serial console * printouts, etc. Truly coinciding maximum latencies should be rare * and what happens together happens separately as well, so this doesn't * decrease the validity of the maximum found: */ static __cacheline_aligned_in_smp unsigned long max_sequence; #ifdef CONFIG_FUNCTION_TRACER /* * Prologue for the preempt and irqs off function tracers. * * Returns 1 if it is OK to continue, and data->disabled is * incremented. * 0 if the trace is to be ignored, and data->disabled * is kept the same. * * Note, this function is also used outside this ifdef but * inside the #ifdef of the function graph tracer below. * This is OK, since the function graph tracer is * dependent on the function tracer. */ static int func_prolog_dec(struct trace_array *tr, struct trace_array_cpu **data, unsigned long *flags) { long disabled; int cpu; /* * Does not matter if we preempt. We test the flags * afterward, to see if irqs are disabled or not. * If we preempt and get a false positive, the flags * test will fail. */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return 0; local_save_flags(*flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(*flags)) return 0; *data = tr->data[cpu]; disabled = atomic_inc_return(&(*data)->disabled); if (likely(disabled == 1)) return 1; atomic_dec(&(*data)->disabled); return 0; } /* * irqsoff uses its own tracer function to keep the overhead down: */ static void irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (!func_prolog_dec(tr, &data, &flags)) return; trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, .flags = FTRACE_OPS_FL_GLOBAL, }; #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { int cpu; if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_irqsoff_tracer(irqsoff_trace, !set); for_each_possible_cpu(cpu) per_cpu(tracing_cpu, cpu) = 0; tracing_max_latency = 0; tracing_reset_online_cpus(irqsoff_trace); return start_irqsoff_tracer(irqsoff_trace, set); } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; int ret; int pc; if (!func_prolog_dec(tr, &data, &flags)) return 0; pc = preempt_count(); ret = __trace_graph_entry(tr, trace, flags, pc); atomic_dec(&data->disabled); return ret; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; int pc; if (!func_prolog_dec(tr, &data, &flags)) return; pc = preempt_count(); __trace_graph_return(tr, trace, flags, pc); atomic_dec(&data->disabled); } static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void irqsoff_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ TRACE_GRAPH_PRINT_PROC | \ TRACE_GRAPH_PRINT_ABS_TIME | \ TRACE_GRAPH_PRINT_DURATION) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); return TRACE_TYPE_UNHANDLED; } static void irqsoff_print_header(struct seq_file *s) { if (is_graph()) print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); else trace_default_header(s); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (is_graph()) trace_graph_function(tr, ip, parent_ip, flags, pc); else trace_function(tr, ip, parent_ip, flags, pc); } #else #define __trace_function trace_function static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } static void irqsoff_print_header(struct seq_file *s) { } static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { cycle_t T0, T1, delta; unsigned long flags; int pc; T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; local_save_flags(flags); pc = preempt_count(); if (!report_latency(delta)) goto out; spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); /* Skip 5 functions to get to the irq/preempt enable function */ __trace_stack(tr, flags, 5, pc); if (data->critical_sequence != max_sequence) goto out_unlock; data->critical_end = parent_ip; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr_single(tr, current, cpu); } max_sequence++; out_unlock: spin_unlock_irqrestore(&max_trace_lock, flags); out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (likely(!tracer_enabled)) return; cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) return; data = tr->data[cpu]; if (unlikely(!data) || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; atomic_dec(&data->disabled); } static inline void stop_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; cpu = raw_smp_processor_id(); /* Always clear the tracing cpu on stopping the trace */ if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu(tracing_cpu, cpu) = 0; else return; if (!tracer_enabled) return; data = tr->data[cpu]; if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); } /* start and stop critical timings used to for stoppage (in idle) */ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_IRQSOFF_TRACER #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); } #else /* !CONFIG_PROVE_LOCKING */ /* * Stubs: */ void trace_softirqs_on(unsigned long ip) { } void trace_softirqs_off(unsigned long ip) { } inline void print_irqtrace_events(struct task_struct *curr) { } /* * We are only interested in hardirq on/off events: */ void trace_hardirqs_on(void) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_on); void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_off); void trace_hardirqs_on_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_PROVE_LOCKING */ #endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { if (preempt_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { if (preempt_trace()) start_critical_timing(a0, a1); } #endif /* CONFIG_PREEMPT_TRACER */ static int start_irqsoff_tracer(struct trace_array *tr, int graph) { int ret = 0; if (!graph) ret = register_ftrace_function(&trace_ops); else ret = register_ftrace_graph(&irqsoff_graph_return, &irqsoff_graph_entry); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_irqsoff_tracer(struct trace_array *tr, int graph) { tracer_enabled = 0; if (!graph) unregister_ftrace_function(&trace_ops); else unregister_ftrace_graph(); } static void __irqsoff_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); tracing_reset_online_cpus(tr); if (start_irqsoff_tracer(tr, is_graph())) printk(KERN_ERR "failed to start irqsoff tracer\n"); } static void irqsoff_tracer_reset(struct trace_array *tr) { stop_irqsoff_tracer(tr, is_graph()); if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void irqsoff_tracer_start(struct trace_array *tr) { tracer_enabled = 1; } static void irqsoff_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } #ifdef CONFIG_IRQSOFF_TRACER static int irqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer irqsoff_tracer __read_mostly = { .name = "irqsoff", .init = irqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_irqsoff(trace) register_tracer(&trace) #else # define register_irqsoff(trace) do { } while (0) #endif #ifdef CONFIG_PREEMPT_TRACER static int preemptoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptoff_tracer __read_mostly = { .name = "preemptoff", .init = preemptoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptoff(trace) register_tracer(&trace) #else # define register_preemptoff(trace) do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) && \ defined(CONFIG_PREEMPT_TRACER) static int preemptirqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptirqsoff_tracer __read_mostly = { .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptirqsoff(trace) register_tracer(&trace) #else # define register_preemptirqsoff(trace) do { } while (0) #endif __init static int init_irqsoff_tracer(void) { register_irqsoff(irqsoff_tracer); register_preemptoff(preemptoff_tracer); register_preemptirqsoff(preemptirqsoff_tracer); return 0; } device_initcall(init_irqsoff_tracer);
gpl-2.0
TripNRaVeR/tripndroid-m7-unleashed-3.4
net/netfilter/xt_ecn.c
383
4450
/* * Xtables module for matching the value of the IPv4/IPv6 and TCP ECN bits * * (C) 2002 by Harald Welte <laforge@gnumonks.org> * (C) 2011 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/ip.h> #include <net/ip.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_ecn.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: Explicit Congestion Notification (ECN) flag match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_ecn"); MODULE_ALIAS("ip6t_ecn"); static bool match_tcp(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *einfo = par->matchinfo; struct tcphdr _tcph; const struct tcphdr *th; th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph); if (th == NULL) return false; if (einfo->operation & XT_ECN_OP_MATCH_ECE) { if (einfo->invert & XT_ECN_OP_MATCH_ECE) { if (th->ece == 1) return false; } else { if (th->ece == 0) return false; } } if (einfo->operation & XT_ECN_OP_MATCH_CWR) { if (einfo->invert & XT_ECN_OP_MATCH_CWR) { if (th->cwr == 1) return false; } else { if (th->cwr == 0) return false; } } return true; } static inline bool match_ip(const struct sk_buff *skb, const struct xt_ecn_info *einfo) { return ((ip_hdr(skb)->tos & XT_ECN_IP_MASK) == einfo->ip_ect) ^ !!(einfo->invert & XT_ECN_OP_MATCH_IP); } static bool ecn_mt4(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *info = par->matchinfo; if (info->operation & XT_ECN_OP_MATCH_IP && !match_ip(skb, info)) return false; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && !match_tcp(skb, par)) return false; return true; } static int ecn_mt_check4(const struct xt_mtchk_param *par) { const struct xt_ecn_info *info = par->matchinfo; const struct ipt_ip *ip = par->entryinfo; if (info->operation & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->invert & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) { pr_info("cannot match TCP bits in rule for non-tcp packets\n"); return -EINVAL; } return 0; } static inline bool match_ipv6(const struct sk_buff *skb, const struct xt_ecn_info *einfo) { return (((ipv6_hdr(skb)->flow_lbl[0] >> 4) & XT_ECN_IP_MASK) == einfo->ip_ect) ^ !!(einfo->invert & XT_ECN_OP_MATCH_IP); } static bool ecn_mt6(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_ecn_info *info = par->matchinfo; if (info->operation & XT_ECN_OP_MATCH_IP && !match_ipv6(skb, info)) return false; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && !match_tcp(skb, par)) return false; return true; } static int ecn_mt_check6(const struct xt_mtchk_param *par) { const struct xt_ecn_info *info = par->matchinfo; const struct ip6t_ip6 *ip = par->entryinfo; if (info->operation & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->invert & XT_ECN_OP_MATCH_MASK) return -EINVAL; if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) && (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) { pr_info("cannot match TCP bits in rule for non-tcp packets\n"); return -EINVAL; } return 0; } static struct xt_match ecn_mt_reg[] __read_mostly = { { .name = "ecn", .family = NFPROTO_IPV4, .match = ecn_mt4, .matchsize = sizeof(struct xt_ecn_info), .checkentry = ecn_mt_check4, .me = THIS_MODULE, }, { .name = "ecn", .family = NFPROTO_IPV6, .match = ecn_mt6, .matchsize = sizeof(struct xt_ecn_info), .checkentry = ecn_mt_check6, .me = THIS_MODULE, }, }; static int __init ecn_mt_init(void) { return xt_register_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); } static void __exit ecn_mt_exit(void) { xt_unregister_matches(ecn_mt_reg, ARRAY_SIZE(ecn_mt_reg)); } module_init(ecn_mt_init); module_exit(ecn_mt_exit);
gpl-2.0
flar2/m7
net/ipv4/netfilter/nf_nat_proto_tcp.c
383
2385
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/export.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_rule.h> #include <net/netfilter/nf_nat_protocol.h> #include <net/netfilter/nf_nat_core.h> static u_int16_t tcp_port_rover; static void tcp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_ipv4_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover); } static bool tcp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); struct tcphdr *hdr; unsigned int hdroff = iphdroff + iph->ihl*4; __be32 oldip, newip; __be16 *portptr, newport, oldport; int hdrsize = 8; if (skb->len >= hdroff + sizeof(struct tcphdr)) hdrsize = sizeof(struct tcphdr); if (!skb_make_writable(skb, hdroff + hdrsize)) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct tcphdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { oldip = iph->saddr; newip = tuple->src.u3.ip; newport = tuple->src.u.tcp.port; portptr = &hdr->source; } else { oldip = iph->daddr; newip = tuple->dst.u3.ip; newport = tuple->dst.u.tcp.port; portptr = &hdr->dest; } oldport = *portptr; *portptr = newport; if (hdrsize < sizeof(*hdr)) return true; inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1); inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); return true; } const struct nf_nat_protocol nf_nat_protocol_tcp = { .protonum = IPPROTO_TCP, .manip_pkt = tcp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = tcp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif };
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt-7.x.main/src/linux/linux-2.6.36/arch/mips/kernel/i8259.c
895
9382
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle */ #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/sysdev.h> #include <asm/i8259.h> #include <asm/io.h> /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * plus some generic x86 specific things if generic specifics makes * any sense at all. * this file should become arch/i386/kernel/irq.c when the old irq.c * moves to arch independent land */ static int i8259A_auto_eoi = -1; DEFINE_RAW_SPINLOCK(i8259A_lock); static void disable_8259A_irq(unsigned int irq); static void enable_8259A_irq(unsigned int irq); static void mask_and_ack_8259A(unsigned int irq); static void init_8259A(int auto_eoi); static struct irq_chip i8259A_chip = { .name = "XT-PIC", .mask = disable_8259A_irq, .disable = disable_8259A_irq, .unmask = enable_8259A_irq, .mask_ack = mask_and_ack_8259A, #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF .set_affinity = plat_set_irq_affinity, #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; /* * 8259A PIC functions to handle ISA devices: */ /* * This contains the irq mask for both 8259A irq controllers, */ static unsigned int cached_irq_mask = 0xffff; #define cached_master_mask (cached_irq_mask) #define cached_slave_mask (cached_irq_mask >> 8) static void disable_8259A_irq(unsigned int irq) { unsigned int mask; unsigned long flags; irq -= I8259A_IRQ_BASE; mask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void enable_8259A_irq(unsigned int irq) { unsigned int mask; unsigned long flags; irq -= I8259A_IRQ_BASE; mask = ~(1 << irq); raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } int i8259A_irq_pending(unsigned int irq) { unsigned int mask; unsigned long flags; int ret; irq -= I8259A_IRQ_BASE; mask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) ret = inb(PIC_MASTER_CMD) & mask; else ret = inb(PIC_SLAVE_CMD) & (mask >> 8); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return ret; } void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); enable_irq(irq); } /* * This function assumes to be called rarely. Switching between * 8259A registers is slow. * This has to be protected by the irq controller spinlock * before being called. */ static inline int i8259A_irq_real(unsigned int irq) { int value; int irqmask = 1 << irq; if (irq < 8) { outb(0x0B, PIC_MASTER_CMD); /* ISR register */ value = inb(PIC_MASTER_CMD) & irqmask; outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ return value; } outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ return value; } /* * Careful! The 8259A is a fragile beast, it pretty * much _has_ to be done exactly like this (mask it * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ static void mask_and_ack_8259A(unsigned int irq) { unsigned int irqmask; unsigned long flags; irq -= I8259A_IRQ_BASE; irqmask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); /* * Lightweight spurious IRQ detection. We do not want * to overdo spurious IRQ handling - it's usually a sign * of hardware problems, so we only do the checks we can * do without slowing down good hardware unnecessarily. * * Note that IRQ7 and IRQ15 (the two spurious IRQs * usually resulting from the 8259A-1|2 PICs) occur * even if the IRQ is masked in the 8259A. Thus we * can check spurious 8259A IRQs without doing the * quite slow i8259A_irq_real() call for every IRQ. * This does not cover 100% of spurious interrupts, * but should be enough to warn the user that there * is something bad going on ... */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; cached_irq_mask |= irqmask; handle_real_irq: if (irq & 8) { inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ outb(cached_slave_mask, PIC_SLAVE_IMR); outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ } else { inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } smtc_im_ack_irq(irq); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; spurious_8259A_irq: /* * this is the slow path - should happen rarely. */ if (i8259A_irq_real(irq)) /* * oops, the IRQ _is_ in service according to the * 8259A - not spurious, go handle it. */ goto handle_real_irq; { static int spurious_irq_mask; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } atomic_inc(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is * simpler for us. */ goto handle_real_irq; } } static int i8259A_resume(struct sys_device *dev) { if (i8259A_auto_eoi >= 0) init_8259A(i8259A_auto_eoi); return 0; } static int i8259A_shutdown(struct sys_device *dev) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it * out of. */ if (i8259A_auto_eoi >= 0) { outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } return 0; } static struct sysdev_class i8259_sysdev_class = { .name = "i8259", .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; static struct sys_device device_i8259A = { .id = 0, .cls = &i8259_sysdev_class, }; static int __init i8259A_init_sysfs(void) { int error = sysdev_class_register(&i8259_sysdev_class); if (!error) error = sysdev_register(&device_i8259A); return error; } device_initcall(i8259A_init_sysfs); static void init_8259A(int auto_eoi) { unsigned long flags; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ /* * outb_p - this has to work on a wide range of PC hardware. */ outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */ outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ if (auto_eoi) /* master does Auto EOI */ outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */ outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ if (auto_eoi) /* * In AEOI mode we just have to mask the interrupt * when acking. */ i8259A_chip.mask_ack = disable_8259A_irq; else i8259A_chip.mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } /* * IRQ2 is cascade interrupt to second interrupt controller */ static struct irqaction irq2 = { .handler = no_action, .name = "cascade", }; static struct resource pic1_io_resource = { .name = "pic1", .start = PIC_MASTER_CMD, .end = PIC_MASTER_IMR, .flags = IORESOURCE_BUSY }; static struct resource pic2_io_resource = { .name = "pic2", .start = PIC_SLAVE_CMD, .end = PIC_SLAVE_IMR, .flags = IORESOURCE_BUSY }; /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8259 * interrupts even if the hardware uses a different interrupt numbering. */ void __init init_i8259_irqs(void) { int i; insert_resource(&ioport_resource, &pic1_io_resource); insert_resource(&ioport_resource, &pic2_io_resource); init_8259A(0); for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); set_irq_probe(i); } setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); }
gpl-2.0
iamroot12C/linux
drivers/power/reset/imx-snvs-poweroff.c
1407
1708
/* Power off driver for i.mx6 * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved. * * based on msm-poweroff.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> static void __iomem *snvs_base; static void do_imx_poweroff(void) { u32 value = readl(snvs_base); /* set TOP and DP_EN bit */ writel(value | 0x60, snvs_base); } static int imx_poweroff_probe(struct platform_device *pdev) { snvs_base = of_iomap(pdev->dev.of_node, 0); if (!snvs_base) { dev_err(&pdev->dev, "failed to get memory\n"); return -ENODEV; } pm_power_off = do_imx_poweroff; return 0; } static const struct of_device_id of_imx_poweroff_match[] = { { .compatible = "fsl,sec-v4.0-poweroff", }, {}, }; MODULE_DEVICE_TABLE(of, of_imx_poweroff_match); static struct platform_driver imx_poweroff_driver = { .probe = imx_poweroff_probe, .driver = { .name = "imx-snvs-poweroff", .of_match_table = of_match_ptr(of_imx_poweroff_match), }, }; static int __init imx_poweroff_init(void) { return platform_driver_register(&imx_poweroff_driver); } device_initcall(imx_poweroff_init);
gpl-2.0
allanm84/linux-imx
drivers/ata/pata_ali.c
1919
17908
/* * pata_ali.c - ALI 15x3 PATA for new ATA layer * (C) 2005 Red Hat Inc * * based in part upon * linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02 * * Copyright (C) 1998-2000 Michel Aubry, Maintainer * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer * * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org) * May be copied or modified under the terms of the GNU General Public License * Copyright (C) 2002 Alan Cox <alan@redhat.com> * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> * * Documentation * Chipset documentation available under NDA only * * TODO/CHECK * Cannot have ATAPI on both master & slave for rev < c2 (???) but * otherwise should do atapi DMA (For now for old we do PIO only for * ATAPI) * Review Sunblade workaround. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> #define DRV_NAME "pata_ali" #define DRV_VERSION "0.7.8" static int ali_atapi_dma = 0; module_param_named(atapi_dma, ali_atapi_dma, int, 0644); MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); static struct pci_dev *ali_isa_bridge; /* * Cable special cases */ static const struct dmi_system_id cable_dmi_table[] = { { .ident = "HP Pavilion N5430", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .ident = "Toshiba Satellite S1800-814", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"), }, }, { } }; static int ali_cable_override(struct pci_dev *pdev) { /* Fujitsu P2000 */ if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF) return 1; /* Mitac 8317 (Winbook-A) and relatives */ if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317) return 1; /* Systems by DMI */ if (dmi_check_system(cable_dmi_table)) return 1; return 0; } /** * ali_c2_cable_detect - cable detection * @ap: ATA port * * Perform cable detection for C2 and later revisions */ static int ali_c2_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 ata66; /* Certain laptops use short but suitable cables and don't implement the detect logic */ if (ali_cable_override(pdev)) return ATA_CBL_PATA40_SHORT; /* Host view cable detect 0x4A bit 0 primary bit 1 secondary Bit set for 40 pin */ pci_read_config_byte(pdev, 0x4A, &ata66); if (ata66 & (1 << ap->port_no)) return ATA_CBL_PATA40; else return ATA_CBL_PATA80; } /** * ali_20_filter - filter for earlier ALI DMA * @ap: ALi ATA port * @adev: attached device * * Ensure that we do not do DMA on CD devices. We may be able to * fix that later on. Also ensure we do not do UDMA on WDC drives */ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask) { char model_num[ATA_ID_PROD_LEN + 1]; /* No DMA on anything but a disk for now */ if (adev->class != ATA_DEV_ATA) mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num)); if (strstr(model_num, "WDC")) return mask &= ~ATA_MASK_UDMA; return mask; } /** * ali_fifo_control - FIFO manager * @ap: ALi channel to control * @adev: device for FIFO control * @on: 0 for off 1 for on * * Enable or disable the FIFO on a given device. Because of the way the * ALi FIFO works it provides a boost on ATA disk but can be confused by * ATAPI and we must therefore manage it. */ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int pio_fifo = 0x54 + ap->port_no; u8 fifo; int shift = 4 * adev->devno; /* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to 0x00. Not all the docs agree but the behaviour we now use is the one stated in the BIOS Programming Guide */ pci_read_config_byte(pdev, pio_fifo, &fifo); fifo &= ~(0x0F << shift); fifo |= (on << shift); pci_write_config_byte(pdev, pio_fifo, fifo); } /** * ali_program_modes - load mode registers * @ap: ALi channel to load * @adev: Device the timing is for * @t: timing data * @ultra: UDMA timing or zero for off * * Loads the timing registers for cmd/data and disable UDMA if * ultra is zero. If ultra is set then load and enable the UDMA * timing but do not touch the command/data timing. */ static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); int cas = 0x58 + 4 * ap->port_no; /* Command timing */ int cbt = 0x59 + 4 * ap->port_no; /* Command timing */ int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */ int udmat = 0x56 + ap->port_no; /* UDMA timing */ int shift = 4 * adev->devno; u8 udma; if (t != NULL) { t->setup = clamp_val(t->setup, 1, 8) & 7; t->act8b = clamp_val(t->act8b, 1, 8) & 7; t->rec8b = clamp_val(t->rec8b, 1, 16) & 15; t->active = clamp_val(t->active, 1, 8) & 7; t->recover = clamp_val(t->recover, 1, 16) & 15; pci_write_config_byte(pdev, cas, t->setup); pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b); pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover); } /* Set up the UDMA enable */ pci_read_config_byte(pdev, udmat, &udma); udma &= ~(0x0F << shift); udma |= ultra << shift; pci_write_config_byte(pdev, udmat, udma); } /** * ali_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Program the ALi registers for PIO mode. */ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev) { struct ata_device *pair = ata_dev_pair(adev); struct ata_timing t; unsigned long T = 1000000000 / 33333; /* PCI clock based */ ata_timing_compute(adev, adev->pio_mode, &t, T, 1); if (pair) { struct ata_timing p; ata_timing_compute(pair, pair->pio_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT); if (pair->dma_mode) { ata_timing_compute(pair, pair->dma_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT); } } /* PIO FIFO is only permitted on ATA disk */ if (adev->class != ATA_DEV_ATA) ali_fifo_control(ap, adev, 0x00); ali_program_modes(ap, adev, &t, 0); if (adev->class == ATA_DEV_ATA) ali_fifo_control(ap, adev, 0x05); } /** * ali_set_dmamode - set initial DMA mode data * @ap: ATA interface * @adev: ATA device * * Program the ALi registers for DMA mode. */ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev) { static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD }; struct ata_device *pair = ata_dev_pair(adev); struct ata_timing t; unsigned long T = 1000000000 / 33333; /* PCI clock based */ struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (adev->class == ATA_DEV_ATA) ali_fifo_control(ap, adev, 0x08); if (adev->dma_mode >= XFER_UDMA_0) { ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]); if (adev->dma_mode >= XFER_UDMA_3) { u8 reg4b; pci_read_config_byte(pdev, 0x4B, &reg4b); reg4b |= 1; pci_write_config_byte(pdev, 0x4B, reg4b); } } else { ata_timing_compute(adev, adev->dma_mode, &t, T, 1); if (pair) { struct ata_timing p; ata_timing_compute(pair, pair->pio_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT); if (pair->dma_mode) { ata_timing_compute(pair, pair->dma_mode, &p, T, 1); ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT); } } ali_program_modes(ap, adev, &t, 0); } } /** * ali_warn_atapi_dma - Warn about ATAPI DMA disablement * @adev: Device * * Whine about ATAPI DMA disablement if @adev is an ATAPI device. * Can be used as ->dev_config. */ static void ali_warn_atapi_dma(struct ata_device *adev) { struct ata_eh_context *ehc = &adev->link->eh_context; int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) { ata_dev_warn(adev, "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n"); ata_dev_warn(adev, "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n"); } } /** * ali_lock_sectors - Keep older devices to 255 sector mode * @adev: Device * * Called during the bus probe for each device that is found. We use * this call to lock the sector count of the device to 255 or less on * older ALi controllers. If we didn't do this then large I/O's would * require LBA48 commands which the older ALi requires are issued by * slower PIO methods */ static void ali_lock_sectors(struct ata_device *adev) { adev->max_sectors = 255; ali_warn_atapi_dma(adev); } /** * ali_check_atapi_dma - DMA check for most ALi controllers * @adev: Device * * Called to decide whether commands should be sent by DMA or PIO */ static int ali_check_atapi_dma(struct ata_queued_cmd *qc) { if (!ali_atapi_dma) { /* FIXME: pata_ali can't do ATAPI DMA reliably but the * IDE alim15x3 driver can. I tried lots of things * but couldn't find what the actual difference was. * If you got an idea, please write it to * linux-ide@vger.kernel.org and cc htejun@gmail.com. * * Disable ATAPI DMA for now. */ return -EOPNOTSUPP; } /* If its not a media command, its not worth it */ if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC) return -EOPNOTSUPP; return 0; } static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes) { u8 r; int port_bit = 4 << link->ap->port_no; /* If our bridge is an ALI 1533 then do the extra work */ if (ali_isa_bridge) { /* Tristate and re-enable the bus signals */ pci_read_config_byte(ali_isa_bridge, 0x58, &r); r &= ~port_bit; pci_write_config_byte(ali_isa_bridge, 0x58, r); r |= port_bit; pci_write_config_byte(ali_isa_bridge, 0x58, r); } ata_sff_postreset(link, classes); } static struct scsi_host_template ali_sht = { ATA_BMDMA_SHT(DRV_NAME), }; /* * Port operations for PIO only ALi */ static struct ata_port_operations ali_early_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = ali_set_piomode, .sff_data_xfer = ata_sff_data_xfer32, }; static const struct ata_port_operations ali_dma_base_ops = { .inherits = &ata_bmdma32_port_ops, .set_piomode = ali_set_piomode, .set_dmamode = ali_set_dmamode, }; /* * Port operations for DMA capable ALi without cable * detect */ static struct ata_port_operations ali_20_port_ops = { .inherits = &ali_dma_base_ops, .cable_detect = ata_cable_40wire, .mode_filter = ali_20_filter, .check_atapi_dma = ali_check_atapi_dma, .dev_config = ali_lock_sectors, }; /* * Port operations for DMA capable ALi with cable detect */ static struct ata_port_operations ali_c2_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, .dev_config = ali_lock_sectors, .postreset = ali_c2_c3_postreset, }; /* * Port operations for DMA capable ALi with cable detect */ static struct ata_port_operations ali_c4_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, .dev_config = ali_lock_sectors, }; /* * Port operations for DMA capable ALi with cable detect and LBA48 */ static struct ata_port_operations ali_c5_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .dev_config = ali_warn_atapi_dma, .cable_detect = ali_c2_cable_detect, }; /** * ali_init_chipset - chip setup function * @pdev: PCI device of ATA controller * * Perform the setup on the device that must be done both at boot * and at resume time. */ static void ali_init_chipset(struct pci_dev *pdev) { u8 tmp; struct pci_dev *north; /* * The chipset revision selects the driver operations and * mode data. */ if (pdev->revision <= 0x20) { pci_read_config_byte(pdev, 0x53, &tmp); tmp |= 0x03; pci_write_config_byte(pdev, 0x53, tmp); } else { pci_read_config_byte(pdev, 0x4a, &tmp); pci_write_config_byte(pdev, 0x4a, tmp | 0x20); pci_read_config_byte(pdev, 0x4B, &tmp); if (pdev->revision < 0xC2) /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ /* Clear CD-ROM DMA write bit */ tmp &= 0x7F; /* Cable and UDMA */ if (pdev->revision >= 0xc2) tmp |= 0x01; pci_write_config_byte(pdev, 0x4B, tmp | 0x08); /* * CD_ROM DMA on (0x53 bit 0). Enable this even if we want * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control * via 0x54/55. */ pci_read_config_byte(pdev, 0x53, &tmp); if (pdev->revision >= 0xc7) tmp |= 0x03; else tmp |= 0x01; /* CD_ROM enable for DMA */ pci_write_config_byte(pdev, 0x53, tmp); } north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) { /* Configure the ALi bridge logic. For non ALi rely on BIOS. Set the south bridge enable bit */ pci_read_config_byte(ali_isa_bridge, 0x79, &tmp); if (pdev->revision == 0xC2) pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04); else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02); } pci_dev_put(north); ata_pci_bmdma_clear_simplex(pdev); } /** * ali_init_one - discovery callback * @pdev: PCI device ID * @id: PCI table info * * An ALi IDE interface has been discovered. Figure out what revision * and perform configuration work before handing it to the ATA layer */ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info_early = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &ali_early_port_ops }; /* Revision 0x20 added DMA */ static const struct ata_port_info info_20 = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0x20 with support logic added UDMA */ static const struct ata_port_info info_20_udma = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &ali_20_port_ops }; /* Revision 0xC2 adds UDMA66 */ static const struct ata_port_info info_c2 = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &ali_c2_port_ops }; /* Revision 0xC3 is UDMA66 for now */ static const struct ata_port_info info_c3 = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, .port_ops = &ali_c2_port_ops }; /* Revision 0xC4 is UDMA100 */ static const struct ata_port_info info_c4 = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_LBA48 | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &ali_c4_port_ops }; /* Revision 0xC5 is UDMA133 with LBA48 DMA */ static const struct ata_port_info info_c5 = { .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_IGN_SIMPLEX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &ali_c5_port_ops }; const struct ata_port_info *ppi[] = { NULL, NULL }; u8 tmp; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* * The chipset revision selects the driver operations and * mode data. */ if (pdev->revision < 0x20) { ppi[0] = &info_early; } else if (pdev->revision < 0xC2) { ppi[0] = &info_20; } else if (pdev->revision == 0xC2) { ppi[0] = &info_c2; } else if (pdev->revision == 0xC3) { ppi[0] = &info_c3; } else if (pdev->revision == 0xC4) { ppi[0] = &info_c4; } else ppi[0] = &info_c5; ali_init_chipset(pdev); if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { /* Are we paired with a UDMA capable chip */ pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp); if ((tmp & 0x1E) == 0x12) ppi[0] = &info_20_udma; } if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); else return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); } #ifdef CONFIG_PM_SLEEP static int ali_reinit_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; ali_init_chipset(pdev); ata_host_resume(host); return 0; } #endif static const struct pci_device_id ali[] = { { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), }, { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), }, { }, }; static struct pci_driver ali_pci_driver = { .name = DRV_NAME, .id_table = ali, .probe = ali_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = ali_reinit_one, #endif }; static int __init ali_init(void) { int ret; ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); ret = pci_register_driver(&ali_pci_driver); if (ret < 0) pci_dev_put(ali_isa_bridge); return ret; } static void __exit ali_exit(void) { pci_unregister_driver(&ali_pci_driver); pci_dev_put(ali_isa_bridge); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for ALi PATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ali); MODULE_VERSION(DRV_VERSION); module_init(ali_init); module_exit(ali_exit);
gpl-2.0
andr00ib/victor-oficial-kernel
net/core/sysctl_net_core.c
2431
5574
/* -*- linux-c -*- * sysctl_net_core.c: sysctl interface to net core subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net/core directory entry (empty =) ). [MS] */ #include <linux/mm.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/netdevice.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/slab.h> #include <net/ip.h> #include <net/sock.h> #include <net/net_ratelimit.h> #ifdef CONFIG_RPS static int rps_sock_flow_sysctl(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned int orig_size, size; int ret, i; ctl_table tmp = { .data = &size, .maxlen = sizeof(size), .mode = table->mode }; struct rps_sock_flow_table *orig_sock_table, *sock_table; static DEFINE_MUTEX(sock_flow_mutex); mutex_lock(&sock_flow_mutex); orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { if (size) { if (size > 1<<30) { /* Enforce limit to prevent overflow */ mutex_unlock(&sock_flow_mutex); return -EINVAL; } size = roundup_pow_of_two(size); if (size != orig_size) { sock_table = vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size)); if (!sock_table) { mutex_unlock(&sock_flow_mutex); return -ENOMEM; } sock_table->mask = size - 1; } else sock_table = orig_sock_table; for (i = 0; i < size; i++) sock_table->ents[i] = RPS_NO_CPU; } else sock_table = NULL; if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); synchronize_rcu(); vfree(orig_sock_table); } } mutex_unlock(&sock_flow_mutex); return ret; } #endif /* CONFIG_RPS */ static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { .procname = "wmem_max", .data = &sysctl_wmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "rmem_max", .data = &sysctl_rmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "wmem_default", .data = &sysctl_wmem_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "rmem_default", .data = &sysctl_rmem_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "dev_weight", .data = &weight_p, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "netdev_max_backlog", .data = &netdev_max_backlog, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #ifdef CONFIG_BPF_JIT { .procname = "bpf_jit_enable", .data = &bpf_jit_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif { .procname = "netdev_tstamp_prequeue", .data = &netdev_tstamp_prequeue, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "message_cost", .data = &net_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "message_burst", .data = &net_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "optmem_max", .data = &sysctl_optmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #ifdef CONFIG_RPS { .procname = "rps_sock_flow_entries", .maxlen = sizeof(int), .mode = 0644, .proc_handler = rps_sock_flow_sysctl }, #endif #endif /* CONFIG_NET */ { .procname = "netdev_budget", .data = &netdev_budget, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "warnings", .data = &net_msg_warn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { } }; static struct ctl_table netns_core_table[] = { { .procname = "somaxconn", .data = &init_net.core.sysctl_somaxconn, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { } }; __net_initdata struct ctl_path net_core_path[] = { { .procname = "net", }, { .procname = "core", }, { }, }; static __net_init int sysctl_core_net_init(struct net *net) { struct ctl_table *tbl; net->core.sysctl_somaxconn = SOMAXCONN; tbl = netns_core_table; if (!net_eq(net, &init_net)) { tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); if (tbl == NULL) goto err_dup; tbl[0].data = &net->core.sysctl_somaxconn; } net->core.sysctl_hdr = register_net_sysctl_table(net, net_core_path, tbl); if (net->core.sysctl_hdr == NULL) goto err_reg; return 0; err_reg: if (tbl != netns_core_table) kfree(tbl); err_dup: return -ENOMEM; } static __net_exit void sysctl_core_net_exit(struct net *net) { struct ctl_table *tbl; tbl = net->core.sysctl_hdr->ctl_table_arg; unregister_net_sysctl_table(net->core.sysctl_hdr); BUG_ON(tbl == netns_core_table); kfree(tbl); } static __net_initdata struct pernet_operations sysctl_core_ops = { .init = sysctl_core_net_init, .exit = sysctl_core_net_exit, }; static __init int sysctl_core_init(void) { static struct ctl_table empty[1]; register_sysctl_paths(net_core_path, empty); register_net_sysctl_rotable(net_core_path, net_core_table); return register_pernet_subsys(&sysctl_core_ops); } fs_initcall(sysctl_core_init);
gpl-2.0
ColDReaVeR/android_kernel_samsung_coriplus
drivers/video/w100fb.c
2943
49084
/* * linux/drivers/video/w100fb.c * * Frame Buffer Device for ATI Imageon w100 (Wallaby) * * Copyright (C) 2002, ATI Corp. * Copyright (C) 2004-2006 Richard Purdie * Copyright (c) 2005 Ian Molton * Copyright (c) 2006 Alberto Mardegan * * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net> * * Generic platform support by Ian Molton <spyro@f2s.com> * and Richard Purdie <rpurdie@rpsys.net> * * w32xx support by Ian Molton * * Hardware acceleration support by Alberto Mardegan * <mardy@users.sourceforge.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/uaccess.h> #include <video/w100fb.h> #include "w100fb.h" /* * Prototypes */ static void w100_suspend(u32 mode); static void w100_vsync(void); static void w100_hw_init(struct w100fb_par*); static void w100_pwm_setup(struct w100fb_par*); static void w100_init_clocks(struct w100fb_par*); static void w100_setup_memory(struct w100fb_par*); static void w100_init_lcd(struct w100fb_par*); static void w100_set_dispregs(struct w100fb_par*); static void w100_update_enable(void); static void w100_update_disable(void); static void calc_hsync(struct w100fb_par *par); static void w100_init_graphic_engine(struct w100fb_par *par); struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit; /* Pseudo palette size */ #define MAX_PALETTES 16 #define W100_SUSPEND_EXTMEM 0 #define W100_SUSPEND_ALL 1 #define BITS_PER_PIXEL 16 /* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */ static void *remapped_base; static void *remapped_regs; static void *remapped_fbuf; #define REMAPPED_FB_LEN 0x15ffff /* This is the offset in the w100's address space we map the current framebuffer memory to. We use the position of external memory as we can remap internal memory to there if external isn't present. */ #define W100_FB_BASE MEM_EXT_BASE_VALUE /* * Sysfs functions */ static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; return sprintf(buf, "%d\n",par->flip); } static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int flip; struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; flip = simple_strtoul(buf, NULL, 10); if (flip > 0) par->flip = 1; else par->flip = 0; w100_update_disable(); w100_set_dispregs(par); w100_update_enable(); calc_hsync(par); return count; } static DEVICE_ATTR(flip, 0644, flip_show, flip_store); static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long regs, param; regs = simple_strtoul(buf, NULL, 16); param = readl(remapped_regs + regs); printk("Read Register 0x%08lX: 0x%08lX\n", regs, param); return count; } static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read); static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long regs, param; sscanf(buf, "%lx %lx", &regs, &param); if (regs <= 0x2000) { printk("Write Register 0x%08lX: 0x%08lX\n", regs, param); writel(param, remapped_regs + regs); } return count; } static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write); static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; return sprintf(buf, "%d\n",par->fastpll_mode); } static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; if (simple_strtoul(buf, NULL, 10) > 0) { par->fastpll_mode=1; printk("w100fb: Using fast system clock (if possible)\n"); } else { par->fastpll_mode=0; printk("w100fb: Using normal system clock\n"); } w100_init_clocks(par); calc_hsync(par); return count; } static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store); /* * Some touchscreens need hsync information from the video driver to * function correctly. We export it here. */ unsigned long w100fb_get_hsynclen(struct device *dev) { struct fb_info *info = dev_get_drvdata(dev); struct w100fb_par *par=info->par; /* If display is blanked/suspended, hsync isn't active */ if (par->blanked) return 0; else return par->hsync_len; } EXPORT_SYMBOL(w100fb_get_hsynclen); static void w100fb_clear_screen(struct w100fb_par *par) { memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8)); } /* * Set a palette value from rgb components */ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { unsigned int val; int ret = 1; /* * If greyscale is true, then we convert the RGB value * to greyscale no matter what visual we are using. */ if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; /* * 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < MAX_PALETTES) { u32 *pal = info->pseudo_palette; val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); pal[regno] = val; ret = 0; } return ret; } /* * Blank the display based on value in blank_mode */ static int w100fb_blank(int blank_mode, struct fb_info *info) { struct w100fb_par *par = info->par; struct w100_tg_info *tg = par->mach->tg; switch(blank_mode) { case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ if (par->blanked == 0) { if(tg && tg->suspend) tg->suspend(par); par->blanked = 1; } break; case FB_BLANK_UNBLANK: /* Unblanking */ if (par->blanked != 0) { if(tg && tg->resume) tg->resume(par); par->blanked = 0; } break; } return 0; } static void w100_fifo_wait(int entries) { union rbbm_status_u status; int i; for (i = 0; i < 2000000; i++) { status.val = readl(remapped_regs + mmRBBM_STATUS); if (status.f.cmdfifo_avail >= entries) return; udelay(1); } printk(KERN_ERR "w100fb: FIFO Timeout!\n"); } static int w100fb_sync(struct fb_info *info) { union rbbm_status_u status; int i; for (i = 0; i < 2000000; i++) { status.val = readl(remapped_regs + mmRBBM_STATUS); if (!status.f.gui_active) return 0; udelay(1); } printk(KERN_ERR "w100fb: Graphic engine timeout!\n"); return -EBUSY; } static void w100_init_graphic_engine(struct w100fb_par *par) { union dp_gui_master_cntl_u gmc; union dp_mix_u dp_mix; union dp_datatype_u dp_datatype; union dp_cntl_u dp_cntl; w100_fifo_wait(4); writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET); writel(par->xres, remapped_regs + mmDST_PITCH); writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET); writel(par->xres, remapped_regs + mmSRC_PITCH); w100_fifo_wait(3); writel(0, remapped_regs + mmSC_TOP_LEFT); writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT); writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT); w100_fifo_wait(4); dp_cntl.val = 0; dp_cntl.f.dst_x_dir = 1; dp_cntl.f.dst_y_dir = 1; dp_cntl.f.src_x_dir = 1; dp_cntl.f.src_y_dir = 1; dp_cntl.f.dst_major_x = 1; dp_cntl.f.src_major_x = 1; writel(dp_cntl.val, remapped_regs + mmDP_CNTL); gmc.val = 0; gmc.f.gmc_src_pitch_offset_cntl = 1; gmc.f.gmc_dst_pitch_offset_cntl = 1; gmc.f.gmc_src_clipping = 1; gmc.f.gmc_dst_clipping = 1; gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE; gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */ gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST; gmc.f.gmc_byte_pix_order = 1; gmc.f.gmc_default_sel = 0; gmc.f.gmc_rop3 = ROP3_SRCCOPY; gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR; gmc.f.gmc_clr_cmp_fcn_dis = 1; gmc.f.gmc_wr_msk_dis = 1; gmc.f.gmc_dp_op = DP_OP_ROP; writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); dp_datatype.val = dp_mix.val = 0; dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype; dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype; dp_datatype.f.dp_src2_type = 0; dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype; dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype; dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order; writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE); dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source; dp_mix.f.dp_src2_source = 1; dp_mix.f.dp_rop3 = gmc.f.gmc_rop3; dp_mix.f.dp_op = gmc.f.gmc_dp_op; writel(dp_mix.val, remapped_regs + mmDP_MIX); } static void w100fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { union dp_gui_master_cntl_u gmc; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_fillrect(info, rect); return; } gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL); gmc.f.gmc_rop3 = ROP3_PATCOPY; gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR; w100_fifo_wait(2); writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR); w100_fifo_wait(2); writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X); writel((rect->width << 16) | (rect->height & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT); } static void w100fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy; u32 h = area->height, w = area->width; union dp_gui_master_cntl_u gmc; if (info->state != FBINFO_STATE_RUNNING) return; if (info->flags & FBINFO_HWACCEL_DISABLED) { cfb_copyarea(info, area); return; } gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL); gmc.f.gmc_rop3 = ROP3_SRCCOPY; gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE; w100_fifo_wait(1); writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL); w100_fifo_wait(3); writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X); writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X); writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT); } /* * Change the resolution by calling the appropriate hardware functions */ static void w100fb_activate_var(struct w100fb_par *par) { struct w100_tg_info *tg = par->mach->tg; w100_pwm_setup(par); w100_setup_memory(par); w100_init_clocks(par); w100fb_clear_screen(par); w100_vsync(); w100_update_disable(); w100_init_lcd(par); w100_set_dispregs(par); w100_update_enable(); w100_init_graphic_engine(par); calc_hsync(par); if (!par->blanked && tg && tg->change) tg->change(par); } /* Select the smallest mode that allows the desired resolution to be * displayed. If desired, the x and y parameters can be rounded up to * match the selected mode. */ static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval) { struct w100_mode *mode = NULL; struct w100_mode *modelist = par->mach->modelist; unsigned int best_x = 0xffffffff, best_y = 0xffffffff; unsigned int i; for (i = 0 ; i < par->mach->num_modes ; i++) { if (modelist[i].xres >= *x && modelist[i].yres >= *y && modelist[i].xres < best_x && modelist[i].yres < best_y) { best_x = modelist[i].xres; best_y = modelist[i].yres; mode = &modelist[i]; } else if(modelist[i].xres >= *y && modelist[i].yres >= *x && modelist[i].xres < best_y && modelist[i].yres < best_x) { best_x = modelist[i].yres; best_y = modelist[i].xres; mode = &modelist[i]; } } if (mode && saveval) { *x = best_x; *y = best_y; } return mode; } /* * w100fb_check_var(): * Get the video params out of 'var'. If a value doesn't fit, round it up, * if it's too big, return -EINVAL. */ static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct w100fb_par *par=info->par; if(!w100fb_get_mode(par, &var->xres, &var->yres, 1)) return -EINVAL; if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1))) return -EINVAL; if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1))) return -EINVAL; var->xres_virtual = max(var->xres_virtual, var->xres); var->yres_virtual = max(var->yres_virtual, var->yres); if (var->bits_per_pixel > BITS_PER_PIXEL) return -EINVAL; else var->bits_per_pixel = BITS_PER_PIXEL; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = var->transp.length = 0; var->nonstd = 0; var->height = -1; var->width = -1; var->vmode = FB_VMODE_NONINTERLACED; var->sync = 0; var->pixclock = 0x04; /* 171521; */ return 0; } /* * w100fb_set_par(): * Set the user defined part of the display for the specified console * by looking at the values in info.var */ static int w100fb_set_par(struct fb_info *info) { struct w100fb_par *par=info->par; if (par->xres != info->var.xres || par->yres != info->var.yres) { par->xres = info->var.xres; par->yres = info->var.yres; par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0); info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.line_length = par->xres * BITS_PER_PIXEL / 8; mutex_lock(&info->mm_lock); if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) { par->extmem_active = 1; info->fix.smem_len = par->mach->mem->size+1; } else { par->extmem_active = 0; info->fix.smem_len = MEM_INT_SIZE+1; } mutex_unlock(&info->mm_lock); w100fb_activate_var(par); } return 0; } /* * Frame buffer operations */ static struct fb_ops w100fb_ops = { .owner = THIS_MODULE, .fb_check_var = w100fb_check_var, .fb_set_par = w100fb_set_par, .fb_setcolreg = w100fb_setcolreg, .fb_blank = w100fb_blank, .fb_fillrect = w100fb_fillrect, .fb_copyarea = w100fb_copyarea, .fb_imageblit = cfb_imageblit, .fb_sync = w100fb_sync, }; #ifdef CONFIG_PM static void w100fb_save_vidmem(struct w100fb_par *par) { int memsize; if (par->extmem_active) { memsize=par->mach->mem->size; par->saved_extmem = vmalloc(memsize); if (par->saved_extmem) memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize); } memsize=MEM_INT_SIZE; par->saved_intmem = vmalloc(memsize); if (par->saved_intmem && par->extmem_active) memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize); else if (par->saved_intmem) memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize); } static void w100fb_restore_vidmem(struct w100fb_par *par) { int memsize; if (par->extmem_active && par->saved_extmem) { memsize=par->mach->mem->size; memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); vfree(par->saved_extmem); } if (par->saved_intmem) { memsize=MEM_INT_SIZE; if (par->extmem_active) memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize); else memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); vfree(par->saved_intmem); } } static int w100fb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *info = platform_get_drvdata(dev); struct w100fb_par *par=info->par; struct w100_tg_info *tg = par->mach->tg; w100fb_save_vidmem(par); if(tg && tg->suspend) tg->suspend(par); w100_suspend(W100_SUSPEND_ALL); par->blanked = 1; return 0; } static int w100fb_resume(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); struct w100fb_par *par=info->par; struct w100_tg_info *tg = par->mach->tg; w100_hw_init(par); w100fb_activate_var(par); w100fb_restore_vidmem(par); if(tg && tg->resume) tg->resume(par); par->blanked = 0; return 0; } #else #define w100fb_suspend NULL #define w100fb_resume NULL #endif int __devinit w100fb_probe(struct platform_device *pdev) { int err = -EIO; struct w100fb_mach_info *inf; struct fb_info *info = NULL; struct w100fb_par *par; struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); unsigned int chip_id; if (!mem) return -EINVAL; /* Remap the chip base address */ remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); if (remapped_base == NULL) goto out; /* Map the register space */ remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); if (remapped_regs == NULL) goto out; /* Identify the chip */ printk("Found "); chip_id = readl(remapped_regs + mmCHIP_ID); switch(chip_id) { case CHIP_ID_W100: printk("w100"); break; case CHIP_ID_W3200: printk("w3200"); break; case CHIP_ID_W3220: printk("w3220"); break; default: printk("Unknown imageon chip ID\n"); err = -ENODEV; goto out; } printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE); /* Remap the framebuffer */ remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE); if (remapped_fbuf == NULL) goto out; info=framebuffer_alloc(sizeof(struct w100fb_par), &pdev->dev); if (!info) { err = -ENOMEM; goto out; } par = info->par; platform_set_drvdata(pdev, info); inf = pdev->dev.platform_data; par->chip_id = chip_id; par->mach = inf; par->fastpll_mode = 0; par->blanked = 0; par->pll_table=w100_get_xtal_table(inf->xtal_freq); if (!par->pll_table) { printk(KERN_ERR "No matching Xtal definition found\n"); err = -EINVAL; goto out; } info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL); if (!info->pseudo_palette) { err = -ENOMEM; goto out; } info->fbops = &w100fb_ops; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->node = -1; info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE); info->screen_size = REMAPPED_FB_LEN; strcpy(info->fix.id, "w100fb"); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.accel = FB_ACCEL_NONE; info->fix.smem_start = mem->start+W100_FB_BASE; info->fix.mmio_start = mem->start+W100_REG_BASE; info->fix.mmio_len = W100_REG_LEN; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { err = -ENOMEM; goto out; } par->mode = &inf->modelist[0]; if(inf->init_mode & INIT_MODE_ROTATED) { info->var.xres = par->mode->yres; info->var.yres = par->mode->xres; } else { info->var.xres = par->mode->xres; info->var.yres = par->mode->yres; } if(inf->init_mode &= INIT_MODE_FLIPPED) par->flip = 1; else par->flip = 0; info->var.xres_virtual = info->var.xres; info->var.yres_virtual = info->var.yres; info->var.pixclock = 0x04; /* 171521; */ info->var.sync = 0; info->var.grayscale = 0; info->var.xoffset = info->var.yoffset = 0; info->var.accel_flags = 0; info->var.activate = FB_ACTIVATE_NOW; w100_hw_init(par); if (w100fb_check_var(&info->var, info) < 0) { err = -EINVAL; goto out; } if (register_framebuffer(info) < 0) { err = -EINVAL; goto out; } err = device_create_file(&pdev->dev, &dev_attr_fastpllclk); err |= device_create_file(&pdev->dev, &dev_attr_reg_read); err |= device_create_file(&pdev->dev, &dev_attr_reg_write); err |= device_create_file(&pdev->dev, &dev_attr_flip); if (err != 0) printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n", info->node, err); printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; out: if (info) { fb_dealloc_cmap(&info->cmap); kfree(info->pseudo_palette); } if (remapped_fbuf != NULL) iounmap(remapped_fbuf); if (remapped_regs != NULL) iounmap(remapped_regs); if (remapped_base != NULL) iounmap(remapped_base); if (info) framebuffer_release(info); return err; } static int __devexit w100fb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct w100fb_par *par=info->par; device_remove_file(&pdev->dev, &dev_attr_fastpllclk); device_remove_file(&pdev->dev, &dev_attr_reg_read); device_remove_file(&pdev->dev, &dev_attr_reg_write); device_remove_file(&pdev->dev, &dev_attr_flip); unregister_framebuffer(info); vfree(par->saved_intmem); vfree(par->saved_extmem); kfree(info->pseudo_palette); fb_dealloc_cmap(&info->cmap); iounmap(remapped_base); iounmap(remapped_regs); iounmap(remapped_fbuf); framebuffer_release(info); return 0; } /* ------------------- chipset specific functions -------------------------- */ static void w100_soft_reset(void) { u16 val = readw((u16 *) remapped_base + cfgSTATUS); writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS); udelay(100); writew(0x00, (u16 *) remapped_base + cfgSTATUS); udelay(100); } static void w100_update_disable(void) { union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl; /* Prevent display updates */ disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; disp_db_buf_wr_cntl.f.update_db_buf = 0; disp_db_buf_wr_cntl.f.en_db_buf = 0; writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL); } static void w100_update_enable(void) { union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl; /* Enable display updates */ disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; disp_db_buf_wr_cntl.f.update_db_buf = 1; disp_db_buf_wr_cntl.f.en_db_buf = 1; writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL); } unsigned long w100fb_gpio_read(int port) { unsigned long value; if (port==W100_GPIO_PORT_A) value = readl(remapped_regs + mmGPIO_DATA); else value = readl(remapped_regs + mmGPIO_DATA2); return value; } void w100fb_gpio_write(int port, unsigned long value) { if (port==W100_GPIO_PORT_A) writel(value, remapped_regs + mmGPIO_DATA); else writel(value, remapped_regs + mmGPIO_DATA2); } EXPORT_SYMBOL(w100fb_gpio_read); EXPORT_SYMBOL(w100fb_gpio_write); /* * Initialization of critical w100 hardware */ static void w100_hw_init(struct w100fb_par *par) { u32 temp32; union cif_cntl_u cif_cntl; union intf_cntl_u intf_cntl; union cfgreg_base_u cfgreg_base; union wrap_top_dir_u wrap_top_dir; union cif_read_dbg_u cif_read_dbg; union cpu_defaults_u cpu_default; union cif_write_dbg_u cif_write_dbg; union wrap_start_dir_u wrap_start_dir; union cif_io_u cif_io; struct w100_gpio_regs *gpio = par->mach->gpio; w100_soft_reset(); /* This is what the fpga_init code does on reset. May be wrong but there is little info available */ writel(0x31, remapped_regs + mmSCRATCH_UMSK); for (temp32 = 0; temp32 < 10000; temp32++) readl(remapped_regs + mmSCRATCH_UMSK); writel(0x30, remapped_regs + mmSCRATCH_UMSK); /* Set up CIF */ cif_io.val = defCIF_IO; writel((u32)(cif_io.val), remapped_regs + mmCIF_IO); cif_write_dbg.val = readl(remapped_regs + mmCIF_WRITE_DBG); cif_write_dbg.f.dis_packer_ful_during_rbbm_timeout = 0; cif_write_dbg.f.en_dword_split_to_rbbm = 1; cif_write_dbg.f.dis_timeout_during_rbbm = 1; writel((u32) (cif_write_dbg.val), remapped_regs + mmCIF_WRITE_DBG); cif_read_dbg.val = readl(remapped_regs + mmCIF_READ_DBG); cif_read_dbg.f.dis_rd_same_byte_to_trig_fetch = 1; writel((u32) (cif_read_dbg.val), remapped_regs + mmCIF_READ_DBG); cif_cntl.val = readl(remapped_regs + mmCIF_CNTL); cif_cntl.f.dis_system_bits = 1; cif_cntl.f.dis_mr = 1; cif_cntl.f.en_wait_to_compensate_dq_prop_dly = 0; cif_cntl.f.intb_oe = 1; cif_cntl.f.interrupt_active_high = 1; writel((u32) (cif_cntl.val), remapped_regs + mmCIF_CNTL); /* Setup cfgINTF_CNTL and cfgCPU defaults */ intf_cntl.val = defINTF_CNTL; intf_cntl.f.ad_inc_a = 1; intf_cntl.f.ad_inc_b = 1; intf_cntl.f.rd_data_rdy_a = 0; intf_cntl.f.rd_data_rdy_b = 0; writeb((u8) (intf_cntl.val), remapped_base + cfgINTF_CNTL); cpu_default.val = defCPU_DEFAULTS; cpu_default.f.access_ind_addr_a = 1; cpu_default.f.access_ind_addr_b = 1; cpu_default.f.access_scratch_reg = 1; cpu_default.f.transition_size = 0; writeb((u8) (cpu_default.val), remapped_base + cfgCPU_DEFAULTS); /* set up the apertures */ writeb((u8) (W100_REG_BASE >> 16), remapped_base + cfgREG_BASE); cfgreg_base.val = defCFGREG_BASE; cfgreg_base.f.cfgreg_base = W100_CFG_BASE; writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE); wrap_start_dir.val = defWRAP_START_DIR; wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1; writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR); wrap_top_dir.val = defWRAP_TOP_DIR; wrap_top_dir.f.top_addr = WRAP_BUF_TOP_VALUE >> 1; writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR); writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL); /* Set the hardware to 565 colour */ temp32 = readl(remapped_regs + mmDISP_DEBUG2); temp32 &= 0xff7fffff; temp32 |= 0x00800000; writel(temp32, remapped_regs + mmDISP_DEBUG2); /* Initialise the GPIO lines */ if (gpio) { writel(gpio->init_data1, remapped_regs + mmGPIO_DATA); writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2); writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1); writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2); writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3); writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4); } } struct power_state { union clk_pin_cntl_u clk_pin_cntl; union pll_ref_fb_div_u pll_ref_fb_div; union pll_cntl_u pll_cntl; union sclk_cntl_u sclk_cntl; union pclk_cntl_u pclk_cntl; union pwrmgt_cntl_u pwrmgt_cntl; int auto_mode; /* system clock auto changing? */ }; static struct power_state w100_pwr_state; /* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */ /* 12.5MHz Crystal PLL Table */ static struct w100_pll_info xtal_12500000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */ { 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */ {100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */ {125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */ {150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */ { 0, 0, 0, 0, 0, 0}, /* Terminator */ }; /* 14.318MHz Crystal PLL Table */ static struct w100_pll_info xtal_14318000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */ { 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */ { 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */ { 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */ {100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */ { 0, 0, 0, 0, 0, 0}, }; /* 16MHz Crystal PLL Table */ static struct w100_pll_info xtal_16000000[] = { /*freq M N_int N_fac tfgoal lock_time */ { 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */ { 80, 1, 9, 0, 0xe0, 13}, /* tfgoal guessed */ { 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */ { 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */ { 0, 0, 0, 0, 0, 0}, }; static struct pll_entries { int xtal_freq; struct w100_pll_info *pll_table; } w100_pll_tables[] = { { 12500000, &xtal_12500000[0] }, { 14318000, &xtal_14318000[0] }, { 16000000, &xtal_16000000[0] }, { 0 }, }; struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq) { struct pll_entries *pll_entry = w100_pll_tables; do { if (freq == pll_entry->xtal_freq) return pll_entry->pll_table; pll_entry++; } while (pll_entry->xtal_freq); return 0; } static unsigned int w100_get_testcount(unsigned int testclk_sel) { union clk_test_cntl_u clk_test_cntl; udelay(5); /* Select the test clock source and reset */ clk_test_cntl.f.start_check_freq = 0x0; clk_test_cntl.f.testclk_sel = testclk_sel; clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */ writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */ writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); /* Run clock test */ clk_test_cntl.f.start_check_freq = 0x1; writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); /* Give the test time to complete */ udelay(20); /* Return the result */ clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL); clk_test_cntl.f.start_check_freq = 0x0; writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); return clk_test_cntl.f.test_count; } static int w100_pll_adjust(struct w100_pll_info *pll) { unsigned int tf80; unsigned int tf20; /* Initial Settings */ w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */ w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */ w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */ w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */ w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */ w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */ w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V * therefore, commented out the following lines * tf80 meant tf100 */ do { /* set VCO input = 0.8 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0xd; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); tf80 = w100_get_testcount(TESTCLK_SRC_PLL); if (tf80 >= (pll->tfgoal)) { /* set VCO input = 0.2 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0x7; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); tf20 = w100_get_testcount(TESTCLK_SRC_PLL); if (tf20 <= (pll->tfgoal)) return 1; /* Success */ if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) && ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) || (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) { /* slow VCO config */ w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1; w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; continue; } } if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) { w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1; } else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) { w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; w100_pwr_state.pll_cntl.f.pll_pvg += 0x1; } else { return 0; /* Error */ } } while(1); } /* * w100_pll_calibration */ static int w100_pll_calibration(struct w100_pll_info *pll) { int status; status = w100_pll_adjust(pll); /* PLL Reset And Lock */ /* set VCO input = 0.5 * VDD */ w100_pwr_state.pll_cntl.f.pll_dactal = 0xa; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); udelay(1); /* reset time */ /* enable charge pump */ w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */ writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); /* set VCO input = Hi-Z, disable DAC */ w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); udelay(400); /* lock time */ /* PLL locked */ return status; } static int w100_pll_set_clk(struct w100_pll_info *pll) { int status; if (w100_pwr_state.auto_mode == 1) /* auto mode */ { w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */ writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); } /* Set system clock source to XTAL whilst adjusting the PLL! */ w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M; w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int; w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac; w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time; writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0; writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); status = w100_pll_calibration(pll); if (w100_pwr_state.auto_mode == 1) /* auto mode */ { w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */ writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); } return status; } /* freq = target frequency of the PLL */ static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq) { struct w100_pll_info *pll = par->pll_table; do { if (freq == pll->freq) { return w100_pll_set_clk(pll); } pll++; } while(pll->freq); return 0; } /* Set up an initial state. Some values/fields set here will be overwritten. */ static void w100_pwm_setup(struct w100fb_par *par) { w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1; w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f; w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0; w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0; w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0; w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0; writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL); w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL; w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */ w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3; w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */ w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0; w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */ w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0; w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL; w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */ w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */ writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */ w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */ w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0; w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5; w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff; writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1; w100_pwr_state.pll_cntl.f.pll_reset = 0x1; w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0; w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */ w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0; w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0; w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; w100_pwr_state.pll_cntl.f.pll_pcp = 0x4; w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0; w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0; w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */ w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3; w100_pwr_state.pll_cntl.f.pll_conf = 0x2; w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2; w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */ w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */ w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */ w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF; w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF; writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); w100_pwr_state.auto_mode = 0; /* manual mode */ } /* * Setup the w100 clocks for the specified mode */ static void w100_init_clocks(struct w100fb_par *par) { struct w100_mode *mode = par->mode; if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL) w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq); w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src; w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider; w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider; writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); } static void w100_init_lcd(struct w100fb_par *par) { u32 temp32; struct w100_mode *mode = par->mode; struct w100_gen_regs *regs = par->mach->regs; union active_h_disp_u active_h_disp; union active_v_disp_u active_v_disp; union graphic_h_disp_u graphic_h_disp; union graphic_v_disp_u graphic_v_disp; union crtc_total_u crtc_total; /* w3200 doesn't like undefined bits being set so zero register values first */ active_h_disp.val = 0; active_h_disp.f.active_h_start=mode->left_margin; active_h_disp.f.active_h_end=mode->left_margin + mode->xres; writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP); active_v_disp.val = 0; active_v_disp.f.active_v_start=mode->upper_margin; active_v_disp.f.active_v_end=mode->upper_margin + mode->yres; writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP); graphic_h_disp.val = 0; graphic_h_disp.f.graphic_h_start=mode->left_margin; graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres; writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP); graphic_v_disp.val = 0; graphic_v_disp.f.graphic_v_start=mode->upper_margin; graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres; writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP); crtc_total.val = 0; crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin; crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin; writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL); writel(mode->crtc_ss, remapped_regs + mmCRTC_SS); writel(mode->crtc_ls, remapped_regs + mmCRTC_LS); writel(mode->crtc_gs, remapped_regs + mmCRTC_GS); writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS); writel(mode->crtc_rev, remapped_regs + mmCRTC_REV); writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK); writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK); writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE); writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE); writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT); writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1); writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2); writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1); writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2); writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3); writel(0x00000000, remapped_regs + mmCRTC_FRAME); writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT); writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR); /* Hack for overlay in ext memory */ temp32 = readl(remapped_regs + mmDISP_DEBUG2); temp32 |= 0xc0000000; writel(temp32, remapped_regs + mmDISP_DEBUG2); } static void w100_setup_memory(struct w100fb_par *par) { union mc_ext_mem_location_u extmem_location; union mc_fb_location_u intmem_location; struct w100_mem_info *mem = par->mach->mem; struct w100_bm_mem_info *bm_mem = par->mach->bm_mem; if (!par->extmem_active) { w100_suspend(W100_SUSPEND_EXTMEM); /* Map Internal Memory at FB Base */ intmem_location.f.mc_fb_start = W100_FB_BASE >> 8; intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8; writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION); /* Unmap External Memory - value is *probably* irrelevant but may have meaning to acceleration libraries */ extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8; extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8; writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION); } else { /* Map Internal Memory to its default location */ intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8; intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8; writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION); /* Map External Memory at FB Base */ extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8; extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8; writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION); writel(0x00007800, remapped_regs + mmMC_BIST_CTRL); writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL); writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG); udelay(100); writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL); writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL); if (bm_mem) { writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH); writel(bm_mem->offset, remapped_regs + mmBM_OFFSET); writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL); writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL); writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG); writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL); writel(bm_mem->config, remapped_regs + mmBM_CONFIG); } } } static void w100_set_dispregs(struct w100fb_par *par) { unsigned long rot=0, divider, offset=0; union graphic_ctrl_u graphic_ctrl; /* See if the mode has been rotated */ if (par->xres == par->mode->xres) { if (par->flip) { rot=3; /* 180 degree */ offset=(par->xres * par->yres) - 1; } /* else 0 degree */ divider = par->mode->pixclk_divider; } else { if (par->flip) { rot=2; /* 270 degree */ offset=par->xres - 1; } else { rot=1; /* 90 degree */ offset=par->xres * (par->yres - 1); } divider = par->mode->pixclk_divider_rotated; } graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */ switch (par->chip_id) { case CHIP_ID_W100: graphic_ctrl.f_w100.color_depth=6; graphic_ctrl.f_w100.en_crtc=1; graphic_ctrl.f_w100.en_graphic_req=1; graphic_ctrl.f_w100.en_graphic_crtc=1; graphic_ctrl.f_w100.lcd_pclk_on=1; graphic_ctrl.f_w100.lcd_sclk_on=1; graphic_ctrl.f_w100.low_power_on=0; graphic_ctrl.f_w100.req_freq=0; graphic_ctrl.f_w100.portrait_mode=rot; /* Zaurus needs this */ switch(par->xres) { case 240: case 320: default: graphic_ctrl.f_w100.total_req_graphic=0xa0; break; case 480: case 640: switch(rot) { case 0: /* 0 */ case 3: /* 180 */ graphic_ctrl.f_w100.low_power_on=1; graphic_ctrl.f_w100.req_freq=5; break; case 1: /* 90 */ case 2: /* 270 */ graphic_ctrl.f_w100.req_freq=4; break; default: break; } graphic_ctrl.f_w100.total_req_graphic=0xf0; break; } break; case CHIP_ID_W3200: case CHIP_ID_W3220: graphic_ctrl.f_w32xx.color_depth=6; graphic_ctrl.f_w32xx.en_crtc=1; graphic_ctrl.f_w32xx.en_graphic_req=1; graphic_ctrl.f_w32xx.en_graphic_crtc=1; graphic_ctrl.f_w32xx.lcd_pclk_on=1; graphic_ctrl.f_w32xx.lcd_sclk_on=1; graphic_ctrl.f_w32xx.low_power_on=0; graphic_ctrl.f_w32xx.req_freq=0; graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */ graphic_ctrl.f_w32xx.portrait_mode=rot; break; } /* Set the pixel clock source and divider */ w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src; w100_pwr_state.pclk_cntl.f.pclk_post_div = divider; writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL); writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET); writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH); } /* * Work out how long the sync pulse lasts * Value is 1/(time in seconds) */ static void calc_hsync(struct w100fb_par *par) { unsigned long hsync; struct w100_mode *mode = par->mode; union crtc_ss_u crtc_ss; if (mode->pixclk_src == CLK_SRC_XTAL) hsync=par->mach->xtal_freq; else hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000; hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1); crtc_ss.val = readl(remapped_regs + mmCRTC_SS); if (crtc_ss.val) par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start); else par->hsync_len = 0; } static void w100_suspend(u32 mode) { u32 val; writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL); val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL); val &= ~(0x00100000); /* bit20=0 */ val |= 0xFF000000; /* bit31:24=0xff */ writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL); val = readl(remapped_regs + mmMEM_EXT_CNTL); val &= ~(0x00040000); /* bit18=0 */ val |= 0x00080000; /* bit19=1 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); udelay(1); /* wait 1us */ if (mode == W100_SUSPEND_EXTMEM) { /* CKE: Tri-State */ val = readl(remapped_regs + mmMEM_EXT_CNTL); val |= 0x40000000; /* bit30=1 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); /* CLK: Stop */ val = readl(remapped_regs + mmMEM_EXT_CNTL); val &= ~(0x00000001); /* bit0=0 */ writel(val, remapped_regs + mmMEM_EXT_CNTL); } else { writel(0x00000000, remapped_regs + mmSCLK_CNTL); writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL); writel(0x00000015, remapped_regs + mmPWRMGT_CNTL); udelay(5); val = readl(remapped_regs + mmPLL_CNTL); val |= 0x00000004; /* bit2=1 */ writel(val, remapped_regs + mmPLL_CNTL); writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); } } static void w100_vsync(void) { u32 tmp; int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */ tmp = readl(remapped_regs + mmACTIVE_V_DISP); /* set vline pos */ writel((tmp >> 16) & 0x3ff, remapped_regs + mmDISP_INT_CNTL); /* disable vline irq */ tmp = readl(remapped_regs + mmGEN_INT_CNTL); tmp &= ~0x00000002; writel(tmp, remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); /* enable vline irq */ writel((tmp | 0x00000002), remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); while(timeout > 0) { if (readl(remapped_regs + mmGEN_INT_STATUS) & 0x00000002) break; udelay(1); timeout--; } /* disable vline irq */ writel(tmp, remapped_regs + mmGEN_INT_CNTL); /* clear vline irq status */ writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); } static struct platform_driver w100fb_driver = { .probe = w100fb_probe, .remove = __devexit_p(w100fb_remove), .suspend = w100fb_suspend, .resume = w100fb_resume, .driver = { .name = "w100fb", }, }; int __init w100fb_init(void) { return platform_driver_register(&w100fb_driver); } void __exit w100fb_cleanup(void) { platform_driver_unregister(&w100fb_driver); } module_init(w100fb_init); module_exit(w100fb_cleanup); MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
dhinesh77/android_kernel_samsung_corsica
net/9p/util.c
2943
3340
/* * net/9p/util.c * * This file contains some helper functions * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/parser.h> #include <linux/idr.h> #include <linux/slab.h> #include <net/9p/9p.h> /** * struct p9_idpool - per-connection accounting for tag idpool * @lock: protects the pool * @pool: idr to allocate tag id from * */ struct p9_idpool { spinlock_t lock; struct idr pool; }; /** * p9_idpool_create - create a new per-connection id pool * */ struct p9_idpool *p9_idpool_create(void) { struct p9_idpool *p; p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); spin_lock_init(&p->lock); idr_init(&p->pool); return p; } EXPORT_SYMBOL(p9_idpool_create); /** * p9_idpool_destroy - create a new per-connection id pool * @p: idpool to destroy */ void p9_idpool_destroy(struct p9_idpool *p) { idr_destroy(&p->pool); kfree(p); } EXPORT_SYMBOL(p9_idpool_destroy); /** * p9_idpool_get - allocate numeric id from pool * @p: pool to allocate from * * Bugs: This seems to be an awful generic function, should it be in idr.c with * the lock included in struct idr? */ int p9_idpool_get(struct p9_idpool *p) { int i = 0; int error; unsigned long flags; retry: if (idr_pre_get(&p->pool, GFP_NOFS) == 0) return -1; spin_lock_irqsave(&p->lock, flags); /* no need to store exactly p, we just need something non-null */ error = idr_get_new(&p->pool, p, &i); spin_unlock_irqrestore(&p->lock, flags); if (error == -EAGAIN) goto retry; else if (error) return -1; P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p); return i; } EXPORT_SYMBOL(p9_idpool_get); /** * p9_idpool_put - release numeric id from pool * @id: numeric id which is being released * @p: pool to release id into * * Bugs: This seems to be an awful generic function, should it be in idr.c with * the lock included in struct idr? */ void p9_idpool_put(int id, struct p9_idpool *p) { unsigned long flags; P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", id, p); spin_lock_irqsave(&p->lock, flags); idr_remove(&p->pool, id); spin_unlock_irqrestore(&p->lock, flags); } EXPORT_SYMBOL(p9_idpool_put); /** * p9_idpool_check - check if the specified id is available * @id: id to check * @p: pool to check */ int p9_idpool_check(int id, struct p9_idpool *p) { return idr_find(&p->pool, id) != NULL; } EXPORT_SYMBOL(p9_idpool_check);
gpl-2.0
SatrioDwiPrabowo/Intuisy-3.4xx-Kernel-Nanhu
drivers/net/wireless/ath6kl/ath9k/ar9003_eeprom.c
3967
149412
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> #include "hw.h" #include "ar9003_phy.h" #include "ar9003_eeprom.h" #define COMP_HDR_LEN 4 #define COMP_CKSUM_LEN 2 #define LE16(x) __constant_cpu_to_le16(x) #define LE32(x) __constant_cpu_to_le32(x) /* Local defines to distinguish between extension and control CTL's */ #define EXT_ADDITIVE (0x8000) #define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) #define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) #define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) #define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ #define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */ #define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */ #define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */ #define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */ #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ #define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) #define EEPROM_DATA_LEN_9485 1088 static int ar9003_hw_power_interpolate(int32_t x, int32_t *px, int32_t *py, u_int16_t np); static const struct ar9300_eeprom ar9300_default = { .eepromVersion = 2, .templateVersion = 2, .macAddr = {0, 2, 3, 4, 5, 6}, .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0c, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 3, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x22222), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 36, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2484, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {36, 36, 36, 36} }, { {36, 36, 36, 36} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {32, 32, 28, 24} }, { {32, 32, 28, 24} }, { {32, 32, 28, 24} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, }, .calTargetPower2GHT40 = { { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x22222), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x000), LE16(0x000), LE16(0x000), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 68, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 0, .tempSlopeHigh = 0, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, { {20, 20, 20, 10} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_x113 = { .eepromVersion = 2, .templateVersion = 6, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"x113-023-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x21, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x11111), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 68, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0xf, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 72, .tempSlopeHigh = 105, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5785, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5190, 0), FREQ2FBIN(5230, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5410, 0), FREQ2FBIN(5510, 0), FREQ2FBIN(5670, 0), FREQ2FBIN(5755, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, { {42, 40, 40, 34} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} }, { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} }, { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} }, { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} }, { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_h112 = { .eepromVersion = 2, .templateVersion = 3, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"h112-241-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x10, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x44444), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0, 0, 0}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0, 0, 0}, .tempSlope = 45, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 40, .tempSlopeHigh = 50, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} }, { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} }, { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_x112 = { .eepromVersion = 2, .templateVersion = 5, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"x112-041-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastclock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x0, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x22222), /* * antCtrlChain[ar9300_max_chains]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) }, /* * xatten1DB[AR9300_max_chains]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x1b, 0x1b, 0x1b}, /* * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0x15, 0x15, 0x15}, .tempSlope = 50, .voltSlope = 0, /* * spurChans[OSPrey_eeprom_modal_sPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80c080), .papdRateMaskHt40 = LE32(0x0080c080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11s */ { {38, 38, 38, 38} }, { {38, 38, 38, 38} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {38, 38, 36, 34} }, { {38, 38, 36, 34} }, { {38, 38, 34, 32} }, }, .calTargetPower2GHT20 = { { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} }, { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} }, { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} }, }, .calTargetPower2GHT40 = { { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} }, { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} }, { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1), /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1), /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1), /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1), /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1), /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1), /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1), /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1), /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x22222), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x0), LE16(0x0), LE16(0x0), }, /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x13, 0x19, 0x17}, /* * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0x19, 0x19, 0x19}, .tempSlope = 70, .voltSlope = 15, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshch check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 72, .tempSlopeHigh = 105, .xatten1DBLow = {0x10, 0x14, 0x10}, .xatten1MarginLow = {0x19, 0x19 , 0x19}, .xatten1DBHigh = {0x1d, 0x20, 0x24}, .xatten1MarginHigh = {0x10, 0x10, 0x10} }, .calFreqPier5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5725, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {32, 32, 28, 26} }, { {32, 32, 28, 26} }, { {32, 32, 28, 26} }, { {32, 32, 26, 24} }, { {32, 32, 26, 24} }, { {32, 32, 24, 22} }, { {30, 30, 24, 22} }, { {30, 30, 24, 22} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} }, { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} }, { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} }, { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} }, { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} }, { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0), /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0), /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0), /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0), /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0), /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0), /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0), /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0), /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0), /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0), /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0), /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0), /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0), /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0), /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[3].ctledges[6].bchannel */ 0xFF, /* Data[3].ctledges[7].bchannel */ 0xFF, }, { /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0), /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0), /* Data[4].ctledges[4].bchannel */ 0xFF, /* Data[4].ctledges[5].bchannel */ 0xFF, /* Data[4].ctledges[6].bchannel */ 0xFF, /* Data[4].ctledges[7].bchannel */ 0xFF, }, { /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0), /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0), /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0), /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0), /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0), /* Data[5].ctledges[6].bchannel */ 0xFF, /* Data[5].ctledges[7].bchannel */ 0xFF }, { /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0), /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0), /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0), /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0), /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0), /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0), /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0), /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0), /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0), /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0), /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0), /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0), /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0), /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0), /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0), /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0), /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0), /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0), /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0), /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0), /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom ar9300_h116 = { .eepromVersion = 2, .templateVersion = 4, .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0}, .custData = {"h116-041-f0000"}, .baseEepHeader = { .regDmn = { LE16(0), LE16(0x1f) }, .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */ .opCapFlags = { .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A, .eepMisc = 0, }, .rfSilent = 0, .blueToothOptions = 0, .deviceCap = 0, .deviceType = 5, /* takes lower byte in eeprom location */ .pwrTableOffset = AR9300_PWR_TABLE_OFFSET, .params_for_tuning_caps = {0, 0}, .featureEnable = 0x0d, /* * bit0 - enable tx temp comp - disabled * bit1 - enable tx volt comp - disabled * bit2 - enable fastClock - enabled * bit3 - enable doubling - enabled * bit4 - enable internal regulator - disabled * bit5 - enable pa predistortion - disabled */ .miscConfiguration = 0, /* bit0 - turn down drivestrength */ .eepromWriteEnableGpio = 6, .wlanDisableGpio = 0, .wlanLedGpio = 8, .rxBandSelectGpio = 0xff, .txrxgain = 0x10, .swreg = 0, }, .modalHeader2G = { /* ar9300_modal_eep_header 2g */ /* 4 idle,t1,t2,b(4 bits per setting) */ .antCtrlCommon = LE32(0x110), /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */ .antCtrlCommon2 = LE32(0x44444), /* * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r, * rx1, rx12, b (2 bits each) */ .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) }, /* * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db * for ar9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x1f, 0x1f, 0x1f}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for ar9280 (0xa20c/b20c 16:12 */ .xatten1Margin = {0x12, 0x12, 0x12}, .tempSlope = 25, .voltSlope = 0, /* * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur * channels in usual fbin coding format */ .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0}, /* * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check * if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2c, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0c80C080), .papdRateMaskHt40 = LE32(0x0080C080), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext1 = { .ant_div_control = 0, .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, .calFreqPier2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2462, 1), }, /* ar9300_cal_data_per_freq_op_loop 2g */ .calPierData2G = { { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} }, }, .calTarget_freqbin_Cck = { FREQ2FBIN(2412, 1), FREQ2FBIN(2472, 1), }, .calTarget_freqbin_2G = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT20 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTarget_freqbin_2GHT40 = { FREQ2FBIN(2412, 1), FREQ2FBIN(2437, 1), FREQ2FBIN(2472, 1) }, .calTargetPowerCck = { /* 1L-5L,5S,11L,11S */ { {34, 34, 34, 34} }, { {34, 34, 34, 34} }, }, .calTargetPower2G = { /* 6-24,36,48,54 */ { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, { {34, 34, 32, 32} }, }, .calTargetPower2GHT20 = { { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} }, }, .calTargetPower2GHT40 = { { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} }, }, .ctlIndex_2G = { 0x11, 0x12, 0x15, 0x17, 0x41, 0x42, 0x45, 0x47, 0x31, 0x32, 0x35, 0x37, }, .ctl_freqbin_2G = { { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2457, 1), FREQ2FBIN(2462, 1) }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2412, 1), FREQ2FBIN(2417, 1), FREQ2FBIN(2462, 1), 0xFF, }, { FREQ2FBIN(2422, 1), FREQ2FBIN(2427, 1), FREQ2FBIN(2447, 1), FREQ2FBIN(2452, 1) }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1), }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0, }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), FREQ2FBIN(2472, 1), 0, }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), }, { /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1), /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1), /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1), 0 }, { /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1), } }, .ctlPowerData_2G = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ .antCtrlCommon = LE32(0x220), /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */ .antCtrlCommon2 = LE32(0x44444), /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */ .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150), }, /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */ .xatten1DB = {0x19, 0x19, 0x19}, /* * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin * for merlin (0xa20c/b20c 16:12 */ .xatten1Margin = {0x14, 0x14, 0x14}, .tempSlope = 70, .voltSlope = 0, /* spurChans spur channels in usual fbin coding format */ .spurChans = {0, 0, 0, 0, 0}, /* noiseFloorThreshCh Check if the register is per chain */ .noiseFloorThreshCh = {-1, 0, 0}, .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, .quick_drop = 0, .xpaBiasLvl = 0, .txFrameToDataStart = 0x0e, .txFrameToPaOn = 0x0e, .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */ .antennaGain = 0, .switchSettling = 0x2d, .adcDesiredSize = -30, .txEndToXpaOff = 0, .txEndToRxOn = 0x2, .txFrameToXpaOn = 0xe, .thresh62 = 28, .papdRateMaskHt20 = LE32(0x0cf0e0e0), .papdRateMaskHt40 = LE32(0x6cf0e0e0), .futureModal = { 0, 0, 0, 0, 0, 0, 0, 0, }, }, .base_ext2 = { .tempSlopeLow = 35, .tempSlopeHigh = 50, .xatten1DBLow = {0, 0, 0}, .xatten1MarginLow = {0, 0, 0}, .xatten1DBHigh = {0, 0, 0}, .xatten1MarginHigh = {0, 0, 0} }, .calFreqPier5G = { FREQ2FBIN(5160, 0), FREQ2FBIN(5220, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5785, 0) }, .calPierData5G = { { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, { {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, {0, 0, 0, 0, 0}, }, }, .calTarget_freqbin_5G = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5600, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT20 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTarget_freqbin_5GHT40 = { FREQ2FBIN(5180, 0), FREQ2FBIN(5240, 0), FREQ2FBIN(5320, 0), FREQ2FBIN(5400, 0), FREQ2FBIN(5500, 0), FREQ2FBIN(5700, 0), FREQ2FBIN(5745, 0), FREQ2FBIN(5825, 0) }, .calTargetPower5G = { /* 6-24,36,48,54 */ { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, { {30, 30, 28, 24} }, }, .calTargetPower5GHT20 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} }, { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} }, { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} }, { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} }, { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} }, }, .calTargetPower5GHT40 = { /* * 0_8_16,1-3_9-11_17-19, * 4,5,6,7,12,13,14,15,20,21,22,23 */ { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} }, { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} }, { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} }, { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} }, { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} }, }, .ctlIndex_5G = { 0x10, 0x16, 0x18, 0x40, 0x46, 0x48, 0x30, 0x36, 0x38 }, .ctl_freqbin_5G = { { /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0), /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0), /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0), /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0), /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0), /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0), /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0), /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0) }, { /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0), /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0), /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[3].ctlEdges[6].bChannel */ 0xFF, /* Data[3].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0), /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0), /* Data[4].ctlEdges[4].bChannel */ 0xFF, /* Data[4].ctlEdges[5].bChannel */ 0xFF, /* Data[4].ctlEdges[6].bChannel */ 0xFF, /* Data[4].ctlEdges[7].bChannel */ 0xFF, }, { /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0), /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0), /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0), /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[5].ctlEdges[6].bChannel */ 0xFF, /* Data[5].ctlEdges[7].bChannel */ 0xFF }, { /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0), /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0), /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0), /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0), /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0), /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0), /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0) }, { /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0), /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0), /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0), /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0), /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0), /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0), /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0), /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0) }, { /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0), /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0), /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0), /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0), /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0), /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0), /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0), /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0) } }, .ctlPowerData_5G = { { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } }; static const struct ar9300_eeprom *ar9300_eep_templates[] = { &ar9300_default, &ar9300_x112, &ar9300_h116, &ar9300_h112, &ar9300_x113, }; static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id) { #define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0])) int it; for (it = 0; it < N_LOOP; it++) if (ar9300_eep_templates[it]->templateVersion == id) return ar9300_eep_templates[it]; return NULL; #undef N_LOOP } static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) { if (fbin == AR5416_BCHAN_UNUSED) return fbin; return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); } static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) { return 0; } static int interpolate(int x, int xa, int xb, int ya, int yb) { int bf, factor, plus; bf = 2 * (yb - ya) * (x - xa) / (xb - xa); factor = bf / 2; plus = bf % 2; return ya + factor + plus; } static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader; switch (param) { case EEP_MAC_LSW: return get_unaligned_be16(eep->macAddr); case EEP_MAC_MID: return get_unaligned_be16(eep->macAddr + 2); case EEP_MAC_MSW: return get_unaligned_be16(eep->macAddr + 4); case EEP_REG_0: return le16_to_cpu(pBase->regDmn[0]); case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags.opFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_TX_MASK: return (pBase->txrxMask >> 4) & 0xf; case EEP_RX_MASK: return pBase->txrxMask & 0xf; case EEP_DRIVE_STRENGTH: #define AR9300_EEP_BASE_DRIV_STRENGTH 0x1 return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH; case EEP_INTERNAL_REGULATOR: /* Bit 4 is internal regulator flag */ return (pBase->featureEnable & 0x10) >> 4; case EEP_SWREG: return le32_to_cpu(pBase->swreg); case EEP_PAPRD: return !!(pBase->featureEnable & BIT(5)); case EEP_CHAIN_MASK_REDUCE: return (pBase->miscConfiguration >> 0x3) & 0x1; case EEP_ANT_DIV_CTL1: return eep->base_ext1.ant_div_control; case EEP_ANTENNA_GAIN_5G: return eep->modalHeader5G.antennaGain; case EEP_ANTENNA_GAIN_2G: return eep->modalHeader2G.antennaGain; case EEP_QUICK_DROP: return pBase->miscConfiguration & BIT(1); default: return 0; } } static bool ar9300_eeprom_read_byte(struct ath_common *common, int address, u8 *buffer) { u16 val; if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) return false; *buffer = (val >> (8 * (address % 2))) & 0xff; return true; } static bool ar9300_eeprom_read_word(struct ath_common *common, int address, u8 *buffer) { u16 val; if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) return false; buffer[0] = val >> 8; buffer[1] = val & 0xff; return true; } static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer, int count) { struct ath_common *common = ath9k_hw_common(ah); int i; if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) { ath_dbg(common, EEPROM, "eeprom address not in range\n"); return false; } /* * Since we're reading the bytes in reverse order from a little-endian * word stream, an even address means we only use the lower half of * the 16-bit word at that address */ if (address % 2 == 0) { if (!ar9300_eeprom_read_byte(common, address--, buffer++)) goto error; count--; } for (i = 0; i < count / 2; i++) { if (!ar9300_eeprom_read_word(common, address, buffer)) goto error; address -= 2; buffer += 2; } if (count % 2) if (!ar9300_eeprom_read_byte(common, address, buffer)) goto error; return true; error: ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n", address); return false; } static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data) { REG_READ(ah, AR9300_OTP_BASE + (4 * addr)); if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE, AR9300_OTP_STATUS_VALID, 1000)) return false; *data = REG_READ(ah, AR9300_OTP_READ_DATA); return true; } static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer, int count) { u32 data; int i; for (i = 0; i < count; i++) { int offset = 8 * ((address - i) % 4); if (!ar9300_otp_read_word(ah, (address - i) / 4, &data)) return false; buffer[i] = (data >> offset) & 0xff; } return true; } static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference, int *length, int *major, int *minor) { unsigned long value[4]; value[0] = best[0]; value[1] = best[1]; value[2] = best[2]; value[3] = best[3]; *code = ((value[0] >> 5) & 0x0007); *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020); *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f); *major = (value[2] & 0x000f); *minor = (value[3] & 0x00ff); } static u16 ar9300_comp_cksum(u8 *data, int dsize) { int it, checksum = 0; for (it = 0; it < dsize; it++) { checksum += data[it]; checksum &= 0xffff; } return checksum; } static bool ar9300_uncompress_block(struct ath_hw *ah, u8 *mptr, int mdataSize, u8 *block, int size) { int it; int spot; int offset; int length; struct ath_common *common = ath9k_hw_common(ah); spot = 0; for (it = 0; it < size; it += (length+2)) { offset = block[it]; offset &= 0xff; spot += offset; length = block[it+1]; length &= 0xff; if (length > 0 && spot >= 0 && spot+length <= mdataSize) { ath_dbg(common, EEPROM, "Restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); memcpy(&mptr[spot], &block[it+2], length); spot += length; } else if (length > 0) { ath_dbg(common, EEPROM, "Bad restore at %d: spot=%d offset=%d length=%d\n", it, spot, offset, length); return false; } } return true; } static int ar9300_compress_decision(struct ath_hw *ah, int it, int code, int reference, u8 *mptr, u8 *word, int length, int mdata_size) { struct ath_common *common = ath9k_hw_common(ah); const struct ar9300_eeprom *eep = NULL; switch (code) { case _CompressNone: if (length != mdata_size) { ath_dbg(common, EEPROM, "EEPROM structure size mismatch memory=%d eeprom=%d\n", mdata_size, length); return -1; } memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); ath_dbg(common, EEPROM, "restored eeprom %d: uncompressed, length %d\n", it, length); break; case _CompressBlock: if (reference == 0) { } else { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { ath_dbg(common, EEPROM, "can't find reference eeprom struct %d\n", reference); return -1; } memcpy(mptr, eep, mdata_size); } ath_dbg(common, EEPROM, "restore eeprom %d: block, reference %d, length %d\n", it, reference, length); ar9300_uncompress_block(ah, mptr, mdata_size, (u8 *) (word + COMP_HDR_LEN), length); break; default: ath_dbg(common, EEPROM, "unknown compression code %d\n", code); return -1; } return 0; } typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer, int count); static bool ar9300_check_header(void *data) { u32 *word = data; return !(*word == 0 || *word == ~0); } static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read, int base_addr) { u8 header[4]; if (!read(ah, base_addr, header, 4)) return false; return ar9300_check_header(header); } static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr, int mdata_size) { struct ath_common *common = ath9k_hw_common(ah); u16 *data = (u16 *) mptr; int i; for (i = 0; i < mdata_size / 2; i++, data++) ath9k_hw_nvram_read(common, i, data); return 0; } /* * Read the configuration data from the eeprom. * The data can be put in any specified memory buffer. * * Returns -1 on error. * Returns address of next memory location on success. */ static int ar9300_eeprom_restore_internal(struct ath_hw *ah, u8 *mptr, int mdata_size) { #define MDEFAULT 15 #define MSTATE 100 int cptr; u8 *word; int code; int reference, length, major, minor; int osize; int it; u16 checksum, mchecksum; struct ath_common *common = ath9k_hw_common(ah); eeprom_read_op read; if (ath9k_hw_use_flash(ah)) return ar9300_eeprom_restore_flash(ah, mptr, mdata_size); word = kzalloc(2048, GFP_KERNEL); if (!word) return -ENOMEM; memcpy(mptr, &ar9300_default, mdata_size); read = ar9300_read_eeprom; if (AR_SREV_9485(ah)) cptr = AR9300_BASE_ADDR_4K; else if (AR_SREV_9330(ah)) cptr = AR9300_BASE_ADDR_512; else cptr = AR9300_BASE_ADDR; ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; read = ar9300_read_otp; cptr = AR9300_BASE_ADDR; ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; cptr = AR9300_BASE_ADDR_512; ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr); if (ar9300_check_eeprom_header(ah, read, cptr)) goto found; goto fail; found: ath_dbg(common, EEPROM, "Found valid EEPROM data\n"); for (it = 0; it < MSTATE; it++) { if (!read(ah, cptr, word, COMP_HDR_LEN)) goto fail; if (!ar9300_check_header(word)) break; ar9300_comp_hdr_unpack(word, &code, &reference, &length, &major, &minor); ath_dbg(common, EEPROM, "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n", cptr, code, reference, length, major, minor); if ((!AR_SREV_9485(ah) && length >= 1024) || (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) { ath_dbg(common, EEPROM, "Skipping bad header\n"); cptr -= COMP_HDR_LEN; continue; } osize = length; read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN); checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]); ath_dbg(common, EEPROM, "checksum %x %x\n", checksum, mchecksum); if (checksum == mchecksum) { ar9300_compress_decision(ah, it, code, reference, mptr, word, length, mdata_size); } else { ath_dbg(common, EEPROM, "skipping block with bad checksum\n"); } cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN); } kfree(word); return cptr; fail: kfree(word); return -1; } /* * Restore the configuration structure by reading the eeprom. * This function destroys any existing in-memory structure * content. */ static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah) { u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep; if (ar9300_eeprom_restore_internal(ah, mptr, sizeof(struct ar9300_eeprom)) < 0) return false; return true; } #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size, struct ar9300_modal_eep_header *modal_hdr) { PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2])); PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Ant. Common Control2", le32_to_cpu(modal_hdr->antCtrlCommon2)); PR_EEP("Ant. Gain", modal_hdr->antennaGain); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 xatten1DB", modal_hdr->xatten1DB[0]); PR_EEP("Chain1 xatten1DB", modal_hdr->xatten1DB[1]); PR_EEP("Chain2 xatten1DB", modal_hdr->xatten1DB[2]); PR_EEP("Chain0 xatten1Margin", modal_hdr->xatten1Margin[0]); PR_EEP("Chain1 xatten1Margin", modal_hdr->xatten1Margin[1]); PR_EEP("Chain2 xatten1Margin", modal_hdr->xatten1Margin[2]); PR_EEP("Temp Slope", modal_hdr->tempSlope); PR_EEP("Volt Slope", modal_hdr->voltSlope); PR_EEP("spur Channels0", modal_hdr->spurChans[0]); PR_EEP("spur Channels1", modal_hdr->spurChans[1]); PR_EEP("spur Channels2", modal_hdr->spurChans[2]); PR_EEP("spur Channels3", modal_hdr->spurChans[3]); PR_EEP("spur Channels4", modal_hdr->spurChans[4]); PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]); PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]); PR_EEP("Quick Drop", modal_hdr->quick_drop); PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("txClip", modal_hdr->txClip); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); return len; } static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_base_eep_hdr *pBase; if (!dump_base_hdr) { len += snprintf(buf + len, size - len, "%20s :\n", "2GHz modal Header"); len += ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader2G); len += snprintf(buf + len, size - len, "%20s :\n", "5GHz modal Header"); len += ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader5G); goto out; } pBase = &eep->baseEepHeader; PR_EEP("EEPROM Version", ah->eeprom.ar9300_eep.eepromVersion); PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0])); PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1])); PR_EEP("TX Mask", (pBase->txrxMask >> 4)); PR_EEP("RX Mask", (pBase->txrxMask & 0x0f)); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_11A)); PR_EEP("Allow 2GHz", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_11G)); PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_2G_HT20)); PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_2G_HT40)); PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01)); PR_EEP("RF Silent", pBase->rfSilent); PR_EEP("BT option", pBase->blueToothOptions); PR_EEP("Device Cap", pBase->deviceCap); PR_EEP("Device Type", pBase->deviceType); PR_EEP("Power Table Offset", pBase->pwrTableOffset); PR_EEP("Tuning Caps1", pBase->params_for_tuning_caps[0]); PR_EEP("Tuning Caps2", pBase->params_for_tuning_caps[1]); PR_EEP("Enable Tx Temp Comp", !!(pBase->featureEnable & BIT(0))); PR_EEP("Enable Tx Volt Comp", !!(pBase->featureEnable & BIT(1))); PR_EEP("Enable fast clock", !!(pBase->featureEnable & BIT(2))); PR_EEP("Enable doubling", !!(pBase->featureEnable & BIT(3))); PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4))); PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5))); PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0))); PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1))); PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1); PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio); PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio); PR_EEP("WLAN LED Gpio", pBase->wlanLedGpio); PR_EEP("Rx Band Select Gpio", pBase->rxBandSelectGpio); PR_EEP("Tx Gain", pBase->txrxgain >> 4); PR_EEP("Rx Gain", pBase->txrxgain & 0xf); PR_EEP("SW Reg", le32_to_cpu(pBase->swreg)); len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", ah->eeprom.ar9300_eep.macAddr); out: if (len > size) len = size; return len; } #else static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { return 0; } #endif /* XXX: review hardware docs */ static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah) { return ah->eeprom.ar9300_eep.eepromVersion; } /* XXX: could be read from the eepromVersion, not sure yet */ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah) { return 0; } static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (is2ghz) return eep->modalHeader2G.xpaBiasLvl; else return eep->modalHeader5G.xpaBiasLvl; } static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) { int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias); else if (AR_SREV_9462(ah)) REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); else { REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias); REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB, bias >> 2); REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1); } } static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le16 val; if (is_2ghz) val = eep->modalHeader2G.switchcomspdt; else val = eep->modalHeader5G.switchcomspdt; return le16_to_cpu(val); } static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le32 val; if (is2ghz) val = eep->modalHeader2G.antCtrlCommon; else val = eep->modalHeader5G.antCtrlCommon; return le32_to_cpu(val); } static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le32 val; if (is2ghz) val = eep->modalHeader2G.antCtrlCommon2; else val = eep->modalHeader5G.antCtrlCommon2; return le32_to_cpu(val); } static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain, bool is2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; __le16 val = 0; if (chain >= 0 && chain < AR9300_MAX_CHAINS) { if (is2ghz) val = eep->modalHeader2G.antCtrlChain[chain]; else val = eep->modalHeader5G.antCtrlChain[chain]; } return le16_to_cpu(val); } static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz) { int chain; u32 regval; u32 ant_div_ctl1; static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = { AR_PHY_SWITCH_CHAIN_0, AR_PHY_SWITCH_CHAIN_1, AR_PHY_SWITCH_CHAIN_2, }; u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz); if (AR_SREV_9462(ah)) { REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_AR9462_ALL, value); } else REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value); /* * AR9462 defines new switch table for BT/WLAN, * here's new field name in XXX.ref for both 2G and 5G. * Register: [GLB_CONTROL] GLB_CONTROL (@0x20044) * 15:12 R/W SWITCH_TABLE_COM_SPDT_WLAN_RX * SWITCH_TABLE_COM_SPDT_WLAN_RX * * 11:8 R/W SWITCH_TABLE_COM_SPDT_WLAN_TX * SWITCH_TABLE_COM_SPDT_WLAN_TX * * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE * SWITCH_TABLE_COM_SPDT_WLAN_IDLE */ if (AR_SREV_9462_20_OR_LATER(ah)) { value = ar9003_switch_com_spdt_get(ah, is2ghz); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_SWITCH_TABLE_COM_SPDT_ALL, value); } value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { if ((ah->rxchainmask & BIT(chain)) || (ah->txchainmask & BIT(chain))) { value = ar9003_hw_ant_ctrl_chain_get(ah, chain, is2ghz); REG_RMW_FIELD(ah, switch_chain_reg[chain], AR_SWITCH_TABLE_ALL, value); } } if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1); /* * main_lnaconf, alt_lnaconf, main_tb, alt_tb * are the fields present */ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); regval &= (~AR_ANT_DIV_CTRL_ALL); regval |= (value & 0x3f) << AR_ANT_DIV_CTRL_ALL_S; /* enable_lnadiv */ regval &= (~AR_PHY_9485_ANT_DIV_LNADIV); regval |= ((value >> 6) & 0x1) << AR_PHY_9485_ANT_DIV_LNADIV_S; REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); /*enable fast_div */ regval = REG_READ(ah, AR_PHY_CCK_DETECT); regval &= (~AR_FAST_DIV_ENABLE); regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S; REG_WRITE(ah, AR_PHY_CCK_DETECT, regval); ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1); /* check whether antenna diversity is enabled */ if ((ant_div_ctl1 >> 0x6) == 0x3) { regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL); /* * clear bits 25-30 main_lnaconf, alt_lnaconf, * main_tb, alt_tb */ regval &= (~(AR_PHY_9485_ANT_DIV_MAIN_LNACONF | AR_PHY_9485_ANT_DIV_ALT_LNACONF | AR_PHY_9485_ANT_DIV_ALT_GAINTB | AR_PHY_9485_ANT_DIV_MAIN_GAINTB)); /* by default use LNA1 for the main antenna */ regval |= (AR_PHY_9485_ANT_DIV_LNA1 << AR_PHY_9485_ANT_DIV_MAIN_LNACONF_S); regval |= (AR_PHY_9485_ANT_DIV_LNA2 << AR_PHY_9485_ANT_DIV_ALT_LNACONF_S); REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval); } } } static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) { int drive_strength; unsigned long reg; drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH); if (!drive_strength) return; reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1); reg &= ~0x00ffffc0; reg |= 0x5 << 21; reg |= 0x5 << 18; reg |= 0x5 << 15; reg |= 0x5 << 12; reg |= 0x5 << 9; reg |= 0x5 << 6; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg); reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2); reg &= ~0xffffffe0; reg |= 0x5 << 29; reg |= 0x5 << 26; reg |= 0x5 << 23; reg |= 0x5 << 20; reg |= 0x5 << 17; reg |= 0x5 << 14; reg |= 0x5 << 11; reg |= 0x5 << 8; reg |= 0x5 << 5; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg); reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4); reg &= ~0xff800000; reg |= 0x5 << 29; reg |= 0x5 << 26; reg |= 0x5 << 23; REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg); } static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain, struct ath9k_channel *chan) { int f[3], t[3]; u16 value; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (chain >= 0 && chain < 3) { if (IS_CHAN_2GHZ(chan)) return eep->modalHeader2G.xatten1DB[chain]; else if (eep->base_ext2.xatten1DBLow[chain] != 0) { t[0] = eep->base_ext2.xatten1DBLow[chain]; f[0] = 5180; t[1] = eep->modalHeader5G.xatten1DB[chain]; f[1] = 5500; t[2] = eep->base_ext2.xatten1DBHigh[chain]; f[2] = 5785; value = ar9003_hw_power_interpolate((s32) chan->channel, f, t, 3); return value; } else return eep->modalHeader5G.xatten1DB[chain]; } return 0; } static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain, struct ath9k_channel *chan) { int f[3], t[3]; u16 value; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (chain >= 0 && chain < 3) { if (IS_CHAN_2GHZ(chan)) return eep->modalHeader2G.xatten1Margin[chain]; else if (eep->base_ext2.xatten1MarginLow[chain] != 0) { t[0] = eep->base_ext2.xatten1MarginLow[chain]; f[0] = 5180; t[1] = eep->modalHeader5G.xatten1Margin[chain]; f[1] = 5500; t[2] = eep->base_ext2.xatten1MarginHigh[chain]; f[2] = 5785; value = ar9003_hw_power_interpolate((s32) chan->channel, f, t, 3); return value; } else return eep->modalHeader5G.xatten1Margin[chain]; } return 0; } static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) { int i; u16 value; unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0, AR_PHY_EXT_ATTEN_CTL_1, AR_PHY_EXT_ATTEN_CTL_2, }; /* Test value. if 0 then attenuation is unused. Don't load anything. */ for (i = 0; i < 3; i++) { if (ah->txchainmask & BIT(i)) { value = ar9003_hw_atten_chain_get(ah, i, chan); REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value); value = ar9003_hw_atten_chain_get_margin(ah, i, chan); REG_RMW_FIELD(ah, ext_atten_reg[i], AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value); } } } static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set) { int timeout = 100; while (pmu_set != REG_READ(ah, pmu_reg)) { if (timeout-- == 0) return false; REG_WRITE(ah, pmu_reg, pmu_set); udelay(10); } return true; } static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) { int internal_regulator = ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR); u32 reg_val; if (internal_regulator) { if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { int reg_pmu_set; reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM; REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; if (AR_SREV_9330(ah)) { if (ah->is_clk_25mhz) { reg_pmu_set = (3 << 1) | (8 << 4) | (3 << 8) | (1 << 14) | (6 << 17) | (1 << 20) | (3 << 24); } else { reg_pmu_set = (4 << 1) | (7 << 4) | (3 << 8) | (1 << 14) | (6 << 17) | (1 << 20) | (3 << 24); } } else { reg_pmu_set = (5 << 1) | (7 << 4) | (2 << 8) | (2 << 14) | (6 << 17) | (1 << 20) | (3 << 24) | (1 << 28); } REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set)) return; reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000) | (4 << 26); REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000) | (1 << 21); REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set); if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set)) return; } else if (AR_SREV_9462(ah)) { reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG); REG_WRITE(ah, AR_PHY_PMU1, reg_val); } else { /* Internal regulator is ON. Write swreg register. */ reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG); REG_WRITE(ah, AR_RTC_REG_CONTROL1, REG_READ(ah, AR_RTC_REG_CONTROL1) & (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM)); REG_WRITE(ah, AR_RTC_REG_CONTROL0, reg_val); /* Set REG_CONTROL1.SWREG_PROGRAM */ REG_WRITE(ah, AR_RTC_REG_CONTROL1, REG_READ(ah, AR_RTC_REG_CONTROL1) | AR_RTC_REG_CONTROL1_SWREG_PROGRAM); } } else { if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) { REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0); while (REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); while (!REG_READ_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD)) udelay(10); REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1); while (!REG_READ_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM)) udelay(10); } else if (AR_SREV_9462(ah)) REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1); else { reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) | AR_RTC_FORCE_SWREG_PRD; REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val); } } } static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0]; if (eep->baseEepHeader.featureEnable & 0x40) { tuning_caps_param &= 0x7f; REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC, tuning_caps_param); REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC, tuning_caps_param); } } static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP); s32 t[3], f[3] = {5180, 5500, 5785}; if (!quick_drop) return; if (freq < 4000) quick_drop = eep->modalHeader2G.quick_drop; else { t[0] = eep->base_ext1.quick_drop_low; t[1] = eep->modalHeader5G.quick_drop; t[2] = eep->base_ext1.quick_drop_high; quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3); } REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop); } static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; u32 value; value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff : eep->modalHeader5G.txEndToXpaOff; REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value); REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL, AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value); } static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); ar9003_hw_drive_strength_apply(ah); ar9003_hw_atten_apply(ah, chan); ar9003_hw_quick_drop_apply(ah, chan->channel); if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah)) ar9003_hw_internal_regulator_apply(ah); if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah)) ar9003_hw_apply_tuning_caps(ah); ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel); } static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, struct ath9k_channel *chan) { } /* * Returns the interpolated y value corresponding to the specified x value * from the np ordered pairs of data (px,py). * The pairs do not have to be in any order. * If the specified x value is less than any of the px, * the returned y value is equal to the py for the lowest px. * If the specified x value is greater than any of the px, * the returned y value is equal to the py for the highest px. */ static int ar9003_hw_power_interpolate(int32_t x, int32_t *px, int32_t *py, u_int16_t np) { int ip = 0; int lx = 0, ly = 0, lhave = 0; int hx = 0, hy = 0, hhave = 0; int dx = 0; int y = 0; lhave = 0; hhave = 0; /* identify best lower and higher x calibration measurement */ for (ip = 0; ip < np; ip++) { dx = x - px[ip]; /* this measurement is higher than our desired x */ if (dx <= 0) { if (!hhave || dx > (x - hx)) { /* new best higher x measurement */ hx = px[ip]; hy = py[ip]; hhave = 1; } } /* this measurement is lower than our desired x */ if (dx >= 0) { if (!lhave || dx < (x - lx)) { /* new best lower x measurement */ lx = px[ip]; ly = py[ip]; lhave = 1; } } } /* the low x is good */ if (lhave) { /* so is the high x */ if (hhave) { /* they're the same, so just pick one */ if (hx == lx) y = ly; else /* interpolate */ y = interpolate(x, lx, hx, ly, hy); } else /* only low is good, use it */ y = ly; } else if (hhave) /* only high is good, use it */ y = hy; else /* nothing is good,this should never happen unless np=0, ???? */ y = -(1 << 30); return y; } static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_legacy *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2G; pFreqBin = eep->calTarget_freqbin_2G; } else { numPiers = AR9300_NUM_5G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5G; pFreqBin = eep->calTarget_freqbin_5G; } /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_ht *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2GHT20; pFreqBin = eep->calTarget_freqbin_2GHT20; } else { numPiers = AR9300_NUM_5G_20_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5GHT20; pFreqBin = eep->calTarget_freqbin_5GHT20; } /* * create array of channels and targetpower * from targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq, bool is2GHz) { u16 numPiers, i; s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS]; s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_ht *pEepromTargetPwr; u8 *pFreqBin; if (is2GHz) { numPiers = AR9300_NUM_2G_40_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower2GHT40; pFreqBin = eep->calTarget_freqbin_2GHT40; } else { numPiers = AR9300_NUM_5G_40_TARGET_POWERS; pEepromTargetPwr = eep->calTargetPower5GHT40; pFreqBin = eep->calTarget_freqbin_5GHT40; } /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah, u16 rateIndex, u16 freq) { u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i; s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS]; s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS]; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck; u8 *pFreqBin = eep->calTarget_freqbin_Cck; /* * create array of channels and targetpower from * targetpower piers stored on eeprom */ for (i = 0; i < numPiers; i++) { freqArray[i] = FBIN2FREQ(pFreqBin[i], 1); targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; } /* interpolate to get target power for given frequency */ return (u8) ar9003_hw_power_interpolate((s32) freq, freqArray, targetPowerArray, numPiers); } /* Set tx power registers to array of values passed in */ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) { #define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) /* make sure forced gain is not set */ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0); /* Write the OFDM power per rate set */ /* 6 (LSB), 9, 12, 18 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0), POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* 24 (LSB), 36, 48, 54 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1), POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* Write the CCK power per rate set */ /* 1L (LSB), reserved, 2L, 2S (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2), POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3), POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) ); /* Write the power for duplicated frames - HT40 */ /* dup40_cck (LSB), dup40_ofdm, ext20_cck, ext20_ofdm (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(8), POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0) ); /* Write the HT20 power per rate set */ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4), POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0) ); /* 6 (LSB), 7, 12, 13 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5), POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0) ); /* 14 (LSB), 15, 20, 21 */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9), POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0) ); /* Mixed HT20 and HT40 rates */ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10), POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0) ); /* * Write the HT40 power per rate set * correct PAR difference between HT40 and HT20/LEGACY * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6), POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0) ); /* 6 (LSB), 7, 12, 13 (MSB) */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7), POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0) ); /* 14 (LSB), 15, 20, 21 */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11), POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0) ); return 0; #undef POW_SM } static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, u8 *targetPowerValT2) { /* XXX: hard code for now, need to get from eeprom struct */ u8 ht40PowerIncForPdadc = 0; bool is2GHz = false; unsigned int i = 0; struct ath_common *common = ath9k_hw_common(ah); if (freq < 4000) is2GHz = true; targetPowerValT2[ALL_TARGET_LEGACY_6_24] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_36] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_48] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_54] = ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq, is2GHz); targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L, freq); targetPowerValT2[ALL_TARGET_LEGACY_5S] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq); targetPowerValT2[ALL_TARGET_LEGACY_11L] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq); targetPowerValT2[ALL_TARGET_LEGACY_11S] = ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq); targetPowerValT2[ALL_TARGET_HT20_0_8_16] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_4] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_5] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_6] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_7] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_12] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_13] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_14] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_15] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_20] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_21] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_22] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT20_23] = ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz); targetPowerValT2[ALL_TARGET_HT40_0_8_16] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_4] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_5] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_6] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_7] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_12] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_13] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_14] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_15] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_20] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_21] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_22] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq, is2GHz) + ht40PowerIncForPdadc; targetPowerValT2[ALL_TARGET_HT40_23] = ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, is2GHz) + ht40PowerIncForPdadc; for (i = 0; i < ar9300RateSize; i++) { ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } } static int ar9003_hw_cal_pier_get(struct ath_hw *ah, int mode, int ipier, int ichain, int *pfrequency, int *pcorrection, int *ptemperature, int *pvoltage) { u8 *pCalPier; struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct; int is2GHz; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ath_common *common = ath9k_hw_common(ah); if (ichain >= AR9300_MAX_CHAINS) { ath_dbg(common, EEPROM, "Invalid chain index, must be less than %d\n", AR9300_MAX_CHAINS); return -1; } if (mode) { /* 5GHz */ if (ipier >= AR9300_NUM_5G_CAL_PIERS) { ath_dbg(common, EEPROM, "Invalid 5GHz cal pier index, must be less than %d\n", AR9300_NUM_5G_CAL_PIERS); return -1; } pCalPier = &(eep->calFreqPier5G[ipier]); pCalPierStruct = &(eep->calPierData5G[ichain][ipier]); is2GHz = 0; } else { if (ipier >= AR9300_NUM_2G_CAL_PIERS) { ath_dbg(common, EEPROM, "Invalid 2GHz cal pier index, must be less than %d\n", AR9300_NUM_2G_CAL_PIERS); return -1; } pCalPier = &(eep->calFreqPier2G[ipier]); pCalPierStruct = &(eep->calPierData2G[ichain][ipier]); is2GHz = 1; } *pfrequency = FBIN2FREQ(*pCalPier, is2GHz); *pcorrection = pCalPierStruct->refPower; *ptemperature = pCalPierStruct->tempMeas; *pvoltage = pCalPierStruct->voltMeas; return 0; } static int ar9003_hw_power_control_override(struct ath_hw *ah, int frequency, int *correction, int *voltage, int *temperature) { int tempSlope = 0; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; int f[3], t[3]; REG_RMW(ah, AR_PHY_TPC_11_B0, (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); if (ah->caps.tx_chainmask & BIT(1)) REG_RMW(ah, AR_PHY_TPC_11_B1, (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); if (ah->caps.tx_chainmask & BIT(2)) REG_RMW(ah, AR_PHY_TPC_11_B2, (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), AR_PHY_TPC_OLPC_GAIN_DELTA); /* enable open loop power control on chip */ REG_RMW(ah, AR_PHY_TPC_6_B0, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); if (ah->caps.tx_chainmask & BIT(1)) REG_RMW(ah, AR_PHY_TPC_6_B1, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); if (ah->caps.tx_chainmask & BIT(2)) REG_RMW(ah, AR_PHY_TPC_6_B2, (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), AR_PHY_TPC_6_ERROR_EST_MODE); /* * enable temperature compensation * Need to use register names */ if (frequency < 4000) tempSlope = eep->modalHeader2G.tempSlope; else if (eep->base_ext2.tempSlopeLow != 0) { t[0] = eep->base_ext2.tempSlopeLow; f[0] = 5180; t[1] = eep->modalHeader5G.tempSlope; f[1] = 5500; t[2] = eep->base_ext2.tempSlopeHigh; f[2] = 5785; tempSlope = ar9003_hw_power_interpolate((s32) frequency, f, t, 3); } else tempSlope = eep->modalHeader5G.tempSlope; REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope); if (AR_SREV_9462_20(ah)) REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1, AR_PHY_TPC_19_B1_ALPHA_THERM, tempSlope); REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE, temperature[0]); return 0; } /* Apply the recorded correction values. */ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) { int ichain, ipier, npier; int mode; int lfrequency[AR9300_MAX_CHAINS], lcorrection[AR9300_MAX_CHAINS], ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS]; int hfrequency[AR9300_MAX_CHAINS], hcorrection[AR9300_MAX_CHAINS], htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS]; int fdiff; int correction[AR9300_MAX_CHAINS], voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS]; int pfrequency, pcorrection, ptemperature, pvoltage; struct ath_common *common = ath9k_hw_common(ah); mode = (frequency >= 4000); if (mode) npier = AR9300_NUM_5G_CAL_PIERS; else npier = AR9300_NUM_2G_CAL_PIERS; for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { lfrequency[ichain] = 0; hfrequency[ichain] = 100000; } /* identify best lower and higher frequency calibration measurement */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { for (ipier = 0; ipier < npier; ipier++) { if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain, &pfrequency, &pcorrection, &ptemperature, &pvoltage)) { fdiff = frequency - pfrequency; /* * this measurement is higher than * our desired frequency */ if (fdiff <= 0) { if (hfrequency[ichain] <= 0 || hfrequency[ichain] >= 100000 || fdiff > (frequency - hfrequency[ichain])) { /* * new best higher * frequency measurement */ hfrequency[ichain] = pfrequency; hcorrection[ichain] = pcorrection; htemperature[ichain] = ptemperature; hvoltage[ichain] = pvoltage; } } if (fdiff >= 0) { if (lfrequency[ichain] <= 0 || fdiff < (frequency - lfrequency[ichain])) { /* * new best lower * frequency measurement */ lfrequency[ichain] = pfrequency; lcorrection[ichain] = pcorrection; ltemperature[ichain] = ptemperature; lvoltage[ichain] = pvoltage; } } } } } /* interpolate */ for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n", ichain, frequency, lfrequency[ichain], lcorrection[ichain], hfrequency[ichain], hcorrection[ichain]); /* they're the same, so just pick one */ if (hfrequency[ichain] == lfrequency[ichain]) { correction[ichain] = lcorrection[ichain]; voltage[ichain] = lvoltage[ichain]; temperature[ichain] = ltemperature[ichain]; } /* the low frequency is good */ else if (frequency - lfrequency[ichain] < 1000) { /* so is the high frequency, interpolate */ if (hfrequency[ichain] - frequency < 1000) { correction[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], lcorrection[ichain], hcorrection[ichain]); temperature[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], ltemperature[ichain], htemperature[ichain]); voltage[ichain] = interpolate(frequency, lfrequency[ichain], hfrequency[ichain], lvoltage[ichain], hvoltage[ichain]); } /* only low is good, use it */ else { correction[ichain] = lcorrection[ichain]; temperature[ichain] = ltemperature[ichain]; voltage[ichain] = lvoltage[ichain]; } } /* only high is good, use it */ else if (hfrequency[ichain] - frequency < 1000) { correction[ichain] = hcorrection[ichain]; temperature[ichain] = htemperature[ichain]; voltage[ichain] = hvoltage[ichain]; } else { /* nothing is good, presume 0???? */ correction[ichain] = 0; temperature[ichain] = 0; voltage[ichain] = 0; } } ar9003_hw_power_control_override(ah, frequency, correction, voltage, temperature); ath_dbg(common, EEPROM, "for frequency=%d, calibration correction = %d %d %d\n", frequency, correction[0], correction[1], correction[2]); return 0; } static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, int idx, int edge, bool is2GHz) { struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; if (is2GHz) return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]); else return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]); } static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, int idx, unsigned int edge, u16 freq, bool is2GHz) { struct cal_ctl_data_2g *ctl_2g = eep->ctlPowerData_2G; struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; u8 *ctl_freqbin = is2GHz ? &eep->ctl_freqbin_2G[idx][0] : &eep->ctl_freqbin_5G[idx][0]; if (is2GHz) { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1])) return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]); } else { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1])) return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]); } return MAX_RATE_POWER; } /* * Find the maximum conformance test limit for the given channel and CTL info */ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, u16 freq, int idx, bool is2GHz) { u16 twiceMaxEdgePower = MAX_RATE_POWER; u8 *ctl_freqbin = is2GHz ? &eep->ctl_freqbin_2G[idx][0] : &eep->ctl_freqbin_5G[idx][0]; u16 num_edges = is2GHz ? AR9300_NUM_BAND_EDGES_2G : AR9300_NUM_BAND_EDGES_5G; unsigned int edge; /* Get the edge power */ for (edge = 0; (edge < num_edges) && (ctl_freqbin[edge] != AR5416_BCHAN_UNUSED); edge++) { /* * If there's an exact channel match or an inband flag set * on the lower channel use the given rdEdgePower */ if (freq == ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz)) { twiceMaxEdgePower = ar9003_hw_get_direct_edge_power(eep, idx, edge, is2GHz); break; } else if ((edge > 0) && (freq < ath9k_hw_fbin2freq(ctl_freqbin[edge], is2GHz))) { twiceMaxEdgePower = ar9003_hw_get_indirect_edge_power(eep, idx, edge, freq, is2GHz); /* * Leave loop - no more affecting edges possible in * this monotonic increasing list */ break; } } return twiceMaxEdgePower; } static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, u8 *pPwrArray, u16 cfgCtl, u8 antenna_reduction, u16 powerLimit) { struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; u16 twiceMaxEdgePower; int i; u16 scaledPower = 0, minCtlPower; static const u16 ctlModesFor11a[] = { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; u8 *ctlIndex; u8 ctlNum; u16 twiceMinEdgePower; bool is2ghz = IS_CHAN_2GHZ(chan); ath9k_hw_get_channel_centers(ah, chan, &centers); scaledPower = powerLimit - antenna_reduction; /* * Reduce scaled Power by number of chains active to get * to per chain tx power level */ switch (ar5416_get_ntxchains(ah->txchainmask)) { case 1: break; case 2: if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; else scaledPower = 0; break; case 3: if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; else scaledPower = 0; break; } scaledPower = max((u16)0, scaledPower); /* * Get target powers from EEPROM - our baseline for TX Power */ if (is2ghz) { /* Setup for CTL modes */ /* CTL_11B, CTL_11G, CTL_2GHT20 */ numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; if (IS_CHAN_HT40(chan)) /* All 2G CTL's */ numCtlModes = ARRAY_SIZE(ctlModesFor11g); } else { /* Setup for CTL modes */ /* CTL_11A, CTL_5GHT20 */ numCtlModes = ARRAY_SIZE(ctlModesFor11a) - SUB_NUM_CTL_MODES_AT_5G_40; pCtlMode = ctlModesFor11a; if (IS_CHAN_HT40(chan)) /* All 5G CTL's */ numCtlModes = ARRAY_SIZE(ctlModesFor11a); } /* * For MIMO, need to apply regulatory caps individually across * dynamically running modes: CCK, OFDM, HT20, HT40 * * The outer loop walks through each possible applicable runtime mode. * The inner loop walks through each ctlIndex entry in EEPROM. * The ctl value is encoded as [7:4] == test group, [3:0] == test mode. */ for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; ath_dbg(common, REGULATORY, "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n", ctlMode, numCtlModes, isHt40CtlMode, (pCtlMode[ctlMode] & EXT_ADDITIVE)); /* walk through each CTL index stored in EEPROM */ if (is2ghz) { ctlIndex = pEepData->ctlIndex_2G; ctlNum = AR9300_NUM_CTLS_2G; } else { ctlIndex = pEepData->ctlIndex_5G; ctlNum = AR9300_NUM_CTLS_5G; } twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { ath_dbg(common, REGULATORY, "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n", i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i], chan->channel); /* * compare test group from regulatory * channel list with test mode from pCtlMode * list */ if ((((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == ctlIndex[i]) || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == ((ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) { twiceMinEdgePower = ar9003_hw_get_max_edge_power(pEepData, freq, i, is2ghz); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) /* * Find the minimum of all CTL * edge powers that apply to * this channel */ twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); else { /* specific */ twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); ath_dbg(common, REGULATORY, "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n", ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, scaledPower, minCtlPower); /* Apply ctl mode to correct target power set */ switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = ALL_TARGET_LEGACY_1L_5L; i <= ALL_TARGET_LEGACY_11S; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; case CTL_11A: case CTL_11G: for (i = ALL_TARGET_LEGACY_6_24; i <= ALL_TARGET_LEGACY_54; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; case CTL_5GHT20: case CTL_2GHT20: for (i = ALL_TARGET_HT20_0_8_16; i <= ALL_TARGET_HT20_21; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); pPwrArray[ALL_TARGET_HT20_22] = (u8)min((u16)pPwrArray[ALL_TARGET_HT20_22], minCtlPower); pPwrArray[ALL_TARGET_HT20_23] = (u8)min((u16)pPwrArray[ALL_TARGET_HT20_23], minCtlPower); break; case CTL_5GHT40: case CTL_2GHT40: for (i = ALL_TARGET_HT40_0_8_16; i <= ALL_TARGET_HT40_23; i++) pPwrArray[i] = (u8)min((u16)pPwrArray[i], minCtlPower); break; default: break; } } /* end ctl mode checking */ } static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx) { u8 mod_idx = mcs_idx % 8; if (mod_idx <= 3) return mod_idx ? (base_pwridx + 1) : base_pwridx; else return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2; } static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_common *common = ath9k_hw_common(ah); struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_modal_eep_header *modal_hdr; u8 targetPowerValT2[ar9300RateSize]; u8 target_power_val_t2_eep[ar9300RateSize]; unsigned int i = 0, paprd_scale_factor = 0; u8 pwr_idx, min_pwridx = 0; ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { if (IS_CHAN_2GHZ(chan)) modal_hdr = &eep->modalHeader2G; else modal_hdr = &eep->modalHeader5G; ah->paprd_ratemask = le32_to_cpu(modal_hdr->papdRateMaskHt20) & AR9300_PAPRD_RATE_MASK; ah->paprd_ratemask_ht40 = le32_to_cpu(modal_hdr->papdRateMaskHt40) & AR9300_PAPRD_RATE_MASK; paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan); min_pwridx = IS_CHAN_HT40(chan) ? ALL_TARGET_HT40_0_8_16 : ALL_TARGET_HT20_0_8_16; if (!ah->paprd_table_write_done) { memcpy(target_power_val_t2_eep, targetPowerValT2, sizeof(targetPowerValT2)); for (i = 0; i < 24; i++) { pwr_idx = mcsidx_to_tgtpwridx(i, min_pwridx); if (ah->paprd_ratemask & (1 << i)) { if (targetPowerValT2[pwr_idx] && targetPowerValT2[pwr_idx] == target_power_val_t2_eep[pwr_idx]) targetPowerValT2[pwr_idx] -= paprd_scale_factor; } } } memcpy(target_power_val_t2_eep, targetPowerValT2, sizeof(targetPowerValT2)); } ar9003_hw_set_power_per_rate_table(ah, chan, targetPowerValT2, cfgCtl, twiceAntennaReduction, powerLimit); if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { for (i = 0; i < ar9300RateSize; i++) { if ((ah->paprd_ratemask & (1 << i)) && (abs(targetPowerValT2[i] - target_power_val_t2_eep[i]) > paprd_scale_factor)) { ah->paprd_ratemask &= ~(1 << i); ath_dbg(common, EEPROM, "paprd disabled for mcs %d\n", i); } } } regulatory->max_power_level = 0; for (i = 0; i < ar9300RateSize; i++) { if (targetPowerValT2[i] > regulatory->max_power_level) regulatory->max_power_level = targetPowerValT2[i]; } ath9k_hw_update_regulatory_maxpower(ah); if (test) return; for (i = 0; i < ar9300RateSize; i++) { ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]); } ah->txpower_limit = regulatory->max_power_level; /* Write target power array to registers */ ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); ar9003_hw_calibration_apply(ah, chan->channel); if (IS_CHAN_2GHZ(chan)) { if (IS_CHAN_HT40(chan)) i = ALL_TARGET_HT40_0_8_16; else i = ALL_TARGET_HT20_0_8_16; } else { if (IS_CHAN_HT40(chan)) i = ALL_TARGET_HT40_7; else i = ALL_TARGET_HT20_7; } ah->paprd_target_power = targetPowerValT2[i]; } static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { return AR_NO_SPUR; } s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */ } s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */ } u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (is_2ghz) return eep->modalHeader2G.spurChans; else return eep->modalHeader5G.spurChans; } unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; if (IS_CHAN_2GHZ(chan)) return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20), AR9300_PAPRD_SCALE_1); else { if (chan->channel >= 5700) return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), AR9300_PAPRD_SCALE_1); else if (chan->channel >= 5400) return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), AR9300_PAPRD_SCALE_2); else return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), AR9300_PAPRD_SCALE_1); } } const struct eeprom_ops eep_ar9300_ops = { .check_eeprom = ath9k_hw_ar9300_check_eeprom, .get_eeprom = ath9k_hw_ar9300_get_eeprom, .fill_eeprom = ath9k_hw_ar9300_fill_eeprom, .dump_eeprom = ath9k_hw_ar9003_dump_eeprom, .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev, .set_board_values = ath9k_hw_ar9300_set_board_values, .set_addac = ath9k_hw_ar9300_set_addac, .set_txpower = ath9k_hw_ar9300_set_txpower, .get_spur_channel = ath9k_hw_ar9300_get_spur_channel };
gpl-2.0
jongwonk/s5pv210_linux_kernel
arch/tile/mm/highmem.c
4479
8463
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/highmem.h> #include <linux/module.h> #include <linux/pagemap.h> #include <asm/homecache.h> #define kmap_get_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ (vaddr)), (vaddr)) void *kmap(struct page *page) { void *kva; unsigned long flags; pte_t *ptep; might_sleep(); if (!PageHighMem(page)) return page_address(page); kva = kmap_high(page); /* * Rewrite the PTE under the lock. This ensures that the page * is not currently migrating. */ ptep = kmap_get_pte((unsigned long)kva); flags = homecache_kpte_lock(); set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); homecache_kpte_unlock(flags); return kva; } EXPORT_SYMBOL(kmap); void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); } EXPORT_SYMBOL(kunmap); /* * Describe a single atomic mapping of a page on a given cpu at a * given address, and allow it to be linked into a list. */ struct atomic_mapped_page { struct list_head list; struct page *page; int cpu; unsigned long va; }; static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&amp_lock); static struct list_head amp_list = LIST_HEAD_INIT(amp_list); /* * Combining this structure with a per-cpu declaration lets us give * each cpu an atomic_mapped_page structure per type. */ struct kmap_amps { struct atomic_mapped_page per_type[KM_TYPE_NR]; }; static DEFINE_PER_CPU(struct kmap_amps, amps); /* * Add a page and va, on this cpu, to the list of kmap_atomic pages, * and write the new pte to memory. Writing the new PTE under the * lock guarantees that it is either on the list before migration starts * (if we won the race), or set_pte() sets the migrating bit in the PTE * (if we lost the race). And doing it under the lock guarantees * that when kmap_atomic_fix_one_pte() comes along, it finds a valid * PTE in memory, iff the mapping is still on the amp_list. * * Finally, doing it under the lock lets us safely examine the page * to see if it is immutable or not, for the generic kmap_atomic() case. * If we examine it earlier we are exposed to a race where it looks * writable earlier, but becomes immutable before we write the PTE. */ static void kmap_atomic_register(struct page *page, enum km_type type, unsigned long va, pte_t *ptep, pte_t pteval) { unsigned long flags; struct atomic_mapped_page *amp; flags = homecache_kpte_lock(); spin_lock(&amp_lock); /* With interrupts disabled, now fill in the per-cpu info. */ amp = &__get_cpu_var(amps).per_type[type]; amp->page = page; amp->cpu = smp_processor_id(); amp->va = va; /* For generic kmap_atomic(), choose the PTE writability now. */ if (!pte_read(pteval)) pteval = mk_pte(page, page_to_kpgprot(page)); list_add(&amp->list, &amp_list); set_pte(ptep, pteval); arch_flush_lazy_mmu_mode(); spin_unlock(&amp_lock); homecache_kpte_unlock(flags); } /* * Remove a page and va, on this cpu, from the list of kmap_atomic pages. * Linear-time search, but we count on the lists being short. * We don't need to adjust the PTE under the lock (as opposed to the * kmap_atomic_register() case), since we're just unconditionally * zeroing the PTE after it's off the list. */ static void kmap_atomic_unregister(struct page *page, unsigned long va) { unsigned long flags; struct atomic_mapped_page *amp; int cpu = smp_processor_id(); spin_lock_irqsave(&amp_lock, flags); list_for_each_entry(amp, &amp_list, list) { if (amp->page == page && amp->cpu == cpu && amp->va == va) break; } BUG_ON(&amp->list == &amp_list); list_del(&amp->list); spin_unlock_irqrestore(&amp_lock, flags); } /* Helper routine for kmap_atomic_fix_kpte(), below. */ static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp, int finished) { pte_t *ptep = kmap_get_pte(amp->va); if (!finished) { set_pte(ptep, pte_mkmigrate(*ptep)); flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, cpumask_of(amp->cpu), NULL, 0); } else { /* * Rewrite a default kernel PTE for this page. * We rely on the fact that set_pte() writes the * present+migrating bits last. */ pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); set_pte(ptep, pte); } } /* * This routine is a helper function for homecache_fix_kpte(); see * its comments for more information on the "finished" argument here. * * Note that we hold the lock while doing the remote flushes, which * will stall any unrelated cpus trying to do kmap_atomic operations. * We could just update the PTEs under the lock, and save away copies * of the structs (or just the va+cpu), then flush them after we * release the lock, but it seems easier just to do it all under the lock. */ void kmap_atomic_fix_kpte(struct page *page, int finished) { struct atomic_mapped_page *amp; unsigned long flags; spin_lock_irqsave(&amp_lock, flags); list_for_each_entry(amp, &amp_list, list) { if (amp->page == page) kmap_atomic_fix_one_kpte(amp, finished); } spin_unlock_irqrestore(&amp_lock, flags); } /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap * because the kmap code must perform a global TLB invalidation when * the kmap pool wraps. * * Note that they may be slower than on x86 (etc.) because unlike on * those platforms, we do have to take a global lock to map and unmap * pages on Tile (see above). * * When holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; pte_t *pte; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); /* Avoid icache flushes by disallowing atomic executable mappings. */ BUG_ON(pte_exec(prot)); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); pte = kmap_get_pte(vaddr); BUG_ON(!pte_none(*pte)); /* Register that this page is mapped atomically on this cpu. */ kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); return (void *)vaddr; } EXPORT_SYMBOL(kmap_atomic_prot); void *kmap_atomic(struct page *page) { /* PAGE_NONE is a magic value that tells us to check immutability. */ return kmap_atomic_prot(page, PAGE_NONE); } EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { pte_t *pte = kmap_get_pte(vaddr); pte_t pteval = *pte; int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR*smp_processor_id(); /* * Force other mappings to Oops if they try to access this pte * without first remapping it. Keeping stale mappings around * is a bad idea. */ BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); kmap_atomic_unregister(pte_page(pteval), vaddr); kpte_clear_flush(pte, vaddr); kmap_atomic_idx_pop(); } else { /* Must be a lowmem page */ BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); } arch_flush_lazy_mmu_mode(); pagefault_enable(); } EXPORT_SYMBOL(__kunmap_atomic); /* * This API is supposed to allow us to map memory without a "struct page". * Currently we don't support this, though this may change in the future. */ void *kmap_atomic_pfn(unsigned long pfn) { return kmap_atomic(pfn_to_page(pfn)); } void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { return kmap_atomic_prot(pfn_to_page(pfn), prot); } struct page *kmap_atomic_to_page(void *ptr) { pte_t *pte; unsigned long vaddr = (unsigned long)ptr; if (vaddr < FIXADDR_START) return virt_to_page(ptr); pte = kmap_get_pte(vaddr); return pte_page(*pte); }
gpl-2.0
HighwindONE/android_kernel_motorola_msm8226
arch/arm/mach-s3c64xx/common.c
4735
9320
/* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * Common Codes for S3C64XX machines * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/irq.h> #include <linux/gpio.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/hardware/vic.h> #include <asm/system_misc.h> #include <mach/map.h> #include <mach/hardware.h> #include <mach/regs-gpio.h> #include <plat/cpu.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/pm.h> #include <plat/gpio-cfg.h> #include <plat/irq-uart.h> #include <plat/irq-vic-timer.h> #include <plat/regs-irqtype.h> #include <plat/regs-serial.h> #include <plat/watchdog-reset.h> #include "common.h" /* uart registration process */ static void __init s3c64xx_init_uarts(struct s3c2410_uartcfg *cfg, int no) { s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no); } /* table of supported CPUs */ static const char name_s3c6400[] = "S3C6400"; static const char name_s3c6410[] = "S3C6410"; static struct cpu_table cpu_ids[] __initdata = { { .idcode = S3C6400_CPU_ID, .idmask = S3C64XX_CPU_MASK, .map_io = s3c6400_map_io, .init_clocks = s3c6400_init_clocks, .init_uarts = s3c64xx_init_uarts, .init = s3c6400_init, .name = name_s3c6400, }, { .idcode = S3C6410_CPU_ID, .idmask = S3C64XX_CPU_MASK, .map_io = s3c6410_map_io, .init_clocks = s3c6410_init_clocks, .init_uarts = s3c64xx_init_uarts, .init = s3c6410_init, .name = name_s3c6410, }, }; /* minimal IO mapping */ /* see notes on uart map in arch/arm/mach-s3c64xx/include/mach/debug-macro.S */ #define UART_OFFS (S3C_PA_UART & 0xfffff) static struct map_desc s3c_iodesc[] __initdata = { { .virtual = (unsigned long)S3C_VA_SYS, .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_MEM, .pfn = __phys_to_pfn(S3C64XX_PA_SROM), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)(S3C_VA_UART + UART_OFFS), .pfn = __phys_to_pfn(S3C_PA_UART), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC0, .pfn = __phys_to_pfn(S3C64XX_PA_VIC0), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)VA_VIC1, .pfn = __phys_to_pfn(S3C64XX_PA_VIC1), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_TIMER, .pfn = __phys_to_pfn(S3C_PA_TIMER), .length = SZ_16K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C64XX_VA_GPIO, .pfn = __phys_to_pfn(S3C64XX_PA_GPIO), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C64XX_VA_MODEM, .pfn = __phys_to_pfn(S3C64XX_PA_MODEM), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_WATCHDOG, .pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = (unsigned long)S3C_VA_USB_HSPHY, .pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY), .length = SZ_1K, .type = MT_DEVICE, }, }; static struct bus_type s3c64xx_subsys = { .name = "s3c64xx-core", .dev_name = "s3c64xx-core", }; static struct device s3c64xx_dev = { .bus = &s3c64xx_subsys, }; /* read cpu identification code */ void __init s3c64xx_init_io(struct map_desc *mach_desc, int size) { /* initialise the io descriptors we need for initialisation */ iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc)); iotable_init(mach_desc, size); init_consistent_dma_size(SZ_8M); /* detect cpu id */ s3c64xx_init_cpu(); s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); } static __init int s3c64xx_dev_init(void) { subsys_system_register(&s3c64xx_subsys, NULL); return device_register(&s3c64xx_dev); } core_initcall(s3c64xx_dev_init); /* * setup the sources the vic should advertise resume * for, even though it is not doing the wake * (set_irq_wake needs to be valid) */ #define IRQ_VIC0_RESUME (1 << (IRQ_RTC_TIC - IRQ_VIC0_BASE)) #define IRQ_VIC1_RESUME (1 << (IRQ_RTC_ALARM - IRQ_VIC1_BASE) | \ 1 << (IRQ_PENDN - IRQ_VIC1_BASE) | \ 1 << (IRQ_HSMMC0 - IRQ_VIC1_BASE) | \ 1 << (IRQ_HSMMC1 - IRQ_VIC1_BASE) | \ 1 << (IRQ_HSMMC2 - IRQ_VIC1_BASE)) void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid) { printk(KERN_DEBUG "%s: initialising interrupts\n", __func__); /* initialise the pair of VICs */ vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, IRQ_VIC0_RESUME); vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, IRQ_VIC1_RESUME); /* add the timer sub-irqs */ s3c_init_vic_timer_irq(5, IRQ_TIMER0); } #define eint_offset(irq) ((irq) - IRQ_EINT(0)) #define eint_irq_to_bit(irq) ((u32)(1 << eint_offset(irq))) static inline void s3c_irq_eint_mask(struct irq_data *data) { u32 mask; mask = __raw_readl(S3C64XX_EINT0MASK); mask |= (u32)data->chip_data; __raw_writel(mask, S3C64XX_EINT0MASK); } static void s3c_irq_eint_unmask(struct irq_data *data) { u32 mask; mask = __raw_readl(S3C64XX_EINT0MASK); mask &= ~((u32)data->chip_data); __raw_writel(mask, S3C64XX_EINT0MASK); } static inline void s3c_irq_eint_ack(struct irq_data *data) { __raw_writel((u32)data->chip_data, S3C64XX_EINT0PEND); } static void s3c_irq_eint_maskack(struct irq_data *data) { /* compiler should in-line these */ s3c_irq_eint_mask(data); s3c_irq_eint_ack(data); } static int s3c_irq_eint_set_type(struct irq_data *data, unsigned int type) { int offs = eint_offset(data->irq); int pin, pin_val; int shift; u32 ctrl, mask; u32 newvalue = 0; void __iomem *reg; if (offs > 27) return -EINVAL; if (offs <= 15) reg = S3C64XX_EINT0CON0; else reg = S3C64XX_EINT0CON1; switch (type) { case IRQ_TYPE_NONE: printk(KERN_WARNING "No edge setting!\n"); break; case IRQ_TYPE_EDGE_RISING: newvalue = S3C2410_EXTINT_RISEEDGE; break; case IRQ_TYPE_EDGE_FALLING: newvalue = S3C2410_EXTINT_FALLEDGE; break; case IRQ_TYPE_EDGE_BOTH: newvalue = S3C2410_EXTINT_BOTHEDGE; break; case IRQ_TYPE_LEVEL_LOW: newvalue = S3C2410_EXTINT_LOWLEV; break; case IRQ_TYPE_LEVEL_HIGH: newvalue = S3C2410_EXTINT_HILEV; break; default: printk(KERN_ERR "No such irq type %d", type); return -1; } if (offs <= 15) shift = (offs / 2) * 4; else shift = ((offs - 16) / 2) * 4; mask = 0x7 << shift; ctrl = __raw_readl(reg); ctrl &= ~mask; ctrl |= newvalue << shift; __raw_writel(ctrl, reg); /* set the GPIO pin appropriately */ if (offs < 16) { pin = S3C64XX_GPN(offs); pin_val = S3C_GPIO_SFN(2); } else if (offs < 23) { pin = S3C64XX_GPL(offs + 8 - 16); pin_val = S3C_GPIO_SFN(3); } else { pin = S3C64XX_GPM(offs - 23); pin_val = S3C_GPIO_SFN(3); } s3c_gpio_cfgpin(pin, pin_val); return 0; } static struct irq_chip s3c_irq_eint = { .name = "s3c-eint", .irq_mask = s3c_irq_eint_mask, .irq_unmask = s3c_irq_eint_unmask, .irq_mask_ack = s3c_irq_eint_maskack, .irq_ack = s3c_irq_eint_ack, .irq_set_type = s3c_irq_eint_set_type, .irq_set_wake = s3c_irqext_wake, }; /* s3c_irq_demux_eint * * This function demuxes the IRQ from the group0 external interrupts, * from IRQ_EINT(0) to IRQ_EINT(27). It is designed to be inlined into * the specific handlers s3c_irq_demux_eintX_Y. */ static inline void s3c_irq_demux_eint(unsigned int start, unsigned int end) { u32 status = __raw_readl(S3C64XX_EINT0PEND); u32 mask = __raw_readl(S3C64XX_EINT0MASK); unsigned int irq; status &= ~mask; status >>= start; status &= (1 << (end - start + 1)) - 1; for (irq = IRQ_EINT(start); irq <= IRQ_EINT(end); irq++) { if (status & 1) generic_handle_irq(irq); status >>= 1; } } static void s3c_irq_demux_eint0_3(unsigned int irq, struct irq_desc *desc) { s3c_irq_demux_eint(0, 3); } static void s3c_irq_demux_eint4_11(unsigned int irq, struct irq_desc *desc) { s3c_irq_demux_eint(4, 11); } static void s3c_irq_demux_eint12_19(unsigned int irq, struct irq_desc *desc) { s3c_irq_demux_eint(12, 19); } static void s3c_irq_demux_eint20_27(unsigned int irq, struct irq_desc *desc) { s3c_irq_demux_eint(20, 27); } static int __init s3c64xx_init_irq_eint(void) { int irq; for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) { irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq); irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq)); set_irq_flags(irq, IRQF_VALID); } irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3); irq_set_chained_handler(IRQ_EINT4_11, s3c_irq_demux_eint4_11); irq_set_chained_handler(IRQ_EINT12_19, s3c_irq_demux_eint12_19); irq_set_chained_handler(IRQ_EINT20_27, s3c_irq_demux_eint20_27); return 0; } arch_initcall(s3c64xx_init_irq_eint); void s3c64xx_restart(char mode, const char *cmd) { if (mode != 's') arch_wdt_reset(); /* if all else fails, or mode was for soft, jump to 0 */ soft_restart(0); }
gpl-2.0
sub77-bkp/T530XXU1BOD8
arch/arm/mach-pnx4008/core.c
4735
6384
/* * arch/arm/mach-pnx4008/core.c * * PNX4008 core startup code * * Authors: Vitaly Wool, Dmitry Chigirev, * Grigory Tolstolytkin, Dmitry Pervushin <source@mvista.com> * * Based on reference code received from Philips: * Copyright (C) 2003 Philips Semiconductors * * 2005 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/serial_8250.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/irq.h> #include <mach/clock.h> #include <mach/dma.h> struct resource spipnx_0_resources[] = { { .start = PNX4008_SPI1_BASE, .end = PNX4008_SPI1_BASE + SZ_4K, .flags = IORESOURCE_MEM, }, { .start = PER_SPI1_REC_XMIT, .flags = IORESOURCE_DMA, }, { .start = SPI1_INT, .flags = IORESOURCE_IRQ, }, { .flags = 0, }, }; struct resource spipnx_1_resources[] = { { .start = PNX4008_SPI2_BASE, .end = PNX4008_SPI2_BASE + SZ_4K, .flags = IORESOURCE_MEM, }, { .start = PER_SPI2_REC_XMIT, .flags = IORESOURCE_DMA, }, { .start = SPI2_INT, .flags = IORESOURCE_IRQ, }, { .flags = 0, } }; static struct spi_board_info spi_board_info[] __initdata = { { .modalias = "m25p80", .max_speed_hz = 1000000, .bus_num = 1, .chip_select = 0, }, }; static struct platform_device spipnx_1 = { .name = "spipnx", .id = 1, .num_resources = ARRAY_SIZE(spipnx_0_resources), .resource = spipnx_0_resources, .dev = { .coherent_dma_mask = 0xFFFFFFFF, }, }; static struct platform_device spipnx_2 = { .name = "spipnx", .id = 2, .num_resources = ARRAY_SIZE(spipnx_1_resources), .resource = spipnx_1_resources, .dev = { .coherent_dma_mask = 0xFFFFFFFF, }, }; static struct plat_serial8250_port platform_serial_ports[] = { { .membase = (void *)__iomem(IO_ADDRESS(PNX4008_UART5_BASE)), .mapbase = (unsigned long)PNX4008_UART5_BASE, .irq = IIR5_INT, .uartclk = PNX4008_UART_CLK, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART | UPF_SKIP_TEST, }, { .membase = (void *)__iomem(IO_ADDRESS(PNX4008_UART3_BASE)), .mapbase = (unsigned long)PNX4008_UART3_BASE, .irq = IIR3_INT, .uartclk = PNX4008_UART_CLK, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART | UPF_SKIP_TEST, }, {} }; static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = &platform_serial_ports, }, }; static struct platform_device nand_flash_device = { .name = "pnx4008-flash", .id = -1, .dev = { .coherent_dma_mask = 0xFFFFFFFF, }, }; /* The dmamask must be set for OHCI to work */ static u64 ohci_dmamask = ~(u32) 0; static struct resource ohci_resources[] = { { .start = IO_ADDRESS(PNX4008_USB_CONFIG_BASE), .end = IO_ADDRESS(PNX4008_USB_CONFIG_BASE + 0x100), .flags = IORESOURCE_MEM, }, { .start = USB_HOST_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device ohci_device = { .name = "pnx4008-usb-ohci", .id = -1, .dev = { .dma_mask = &ohci_dmamask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(ohci_resources), .resource = ohci_resources, }; static struct platform_device sdum_device = { .name = "pnx4008-sdum", .id = 0, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct platform_device rgbfb_device = { .name = "pnx4008-rgbfb", .id = 0, .dev = { .coherent_dma_mask = 0xffffffff, } }; struct resource watchdog_resources[] = { { .start = PNX4008_WDOG_BASE, .end = PNX4008_WDOG_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device watchdog_device = { .name = "pnx4008-watchdog", .id = -1, .num_resources = ARRAY_SIZE(watchdog_resources), .resource = watchdog_resources, }; static struct platform_device *devices[] __initdata = { &spipnx_1, &spipnx_2, &serial_device, &ohci_device, &nand_flash_device, &sdum_device, &rgbfb_device, &watchdog_device, }; extern void pnx4008_uart_init(void); static void __init pnx4008_init(void) { /*disable all START interrupt sources, and clear all START interrupt flags */ __raw_writel(0, START_INT_ER_REG(SE_PIN_BASE_INT)); __raw_writel(0, START_INT_ER_REG(SE_INT_BASE_INT)); __raw_writel(0xffffffff, START_INT_RSR_REG(SE_PIN_BASE_INT)); __raw_writel(0xffffffff, START_INT_RSR_REG(SE_INT_BASE_INT)); platform_add_devices(devices, ARRAY_SIZE(devices)); spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info)); /* Switch on the UART clocks */ pnx4008_uart_init(); } static struct map_desc pnx4008_io_desc[] __initdata = { { .virtual = IO_ADDRESS(PNX4008_IRAM_BASE), .pfn = __phys_to_pfn(PNX4008_IRAM_BASE), .length = SZ_64K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(PNX4008_NDF_FLASH_BASE), .pfn = __phys_to_pfn(PNX4008_NDF_FLASH_BASE), .length = SZ_1M - SZ_128K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(PNX4008_JPEG_CONFIG_BASE), .pfn = __phys_to_pfn(PNX4008_JPEG_CONFIG_BASE), .length = SZ_128K * 3, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(PNX4008_DMA_CONFIG_BASE), .pfn = __phys_to_pfn(PNX4008_DMA_CONFIG_BASE), .length = SZ_1M, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(PNX4008_AHB2FAB_BASE), .pfn = __phys_to_pfn(PNX4008_AHB2FAB_BASE), .length = SZ_1M, .type = MT_DEVICE, }, }; void __init pnx4008_map_io(void) { iotable_init(pnx4008_io_desc, ARRAY_SIZE(pnx4008_io_desc)); } static void pnx4008_restart(char mode, const char *cmd) { soft_restart(0); } extern struct sys_timer pnx4008_timer; MACHINE_START(PNX4008, "Philips PNX4008") /* Maintainer: MontaVista Software Inc. */ .atag_offset = 0x100, .map_io = pnx4008_map_io, .init_irq = pnx4008_init_irq, .init_machine = pnx4008_init, .timer = &pnx4008_timer, .restart = pnx4008_restart, MACHINE_END
gpl-2.0
SK4G/android_kernel_samsung_sidekick4g
arch/mips/sni/irq.c
4735
1778
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 2006 Thomas Bogendoerfer */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <asm/i8259.h> #include <asm/io.h> #include <asm/sni.h> #include <asm/irq.h> #include <asm/irq_cpu.h> void (*sni_hwint)(void); asmlinkage void plat_irq_dispatch(void) { sni_hwint(); } /* ISA irq handler */ static irqreturn_t sni_isa_irq_handler(int dummy, void *p) { int irq; irq = i8259_irq(); if (unlikely(irq < 0)) return IRQ_NONE; generic_handle_irq(irq); return IRQ_HANDLED; } struct irqaction sni_isa_irq = { .handler = sni_isa_irq_handler, .name = "ISA", .flags = IRQF_SHARED | IRQF_DISABLED }; /* * On systems with i8259-style interrupt controllers we assume for * driver compatibility reasons interrupts 0 - 15 to be the i8295 * interrupts even if the hardware uses a different interrupt numbering. */ void __init arch_init_irq(void) { init_i8259_irqs(); /* Integrated i8259 */ switch (sni_brd_type) { case SNI_BRD_10: case SNI_BRD_10NEW: case SNI_BRD_TOWER_OASIC: case SNI_BRD_MINITOWER: sni_a20r_irq_init(); break; case SNI_BRD_PCI_TOWER: sni_pcit_irq_init(); break; case SNI_BRD_PCI_TOWER_CPLUS: sni_pcit_cplus_irq_init(); break; case SNI_BRD_RM200: sni_rm200_irq_init(); break; case SNI_BRD_PCI_MTOWER: case SNI_BRD_PCI_DESKTOP: case SNI_BRD_PCI_MTOWER_CPLUS: sni_pcimt_irq_init(); break; } }
gpl-2.0
dr87/G2-L-KERNEL
arch/arm/mach-sa1100/badge4.c
4735
7463
/* * linux/arch/arm/mach-sa1100/badge4.c * * BadgePAD 4 specific initialization * * Tim Connors <connors@hpl.hp.com> * Christopher Hoover <ch@hpl.hp.com> * * Copyright (C) 2002 Hewlett-Packard Company * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/tty.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <mach/irqs.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/hardware/sa1111.h> #include <asm/mach/serial_sa1100.h> #include <mach/badge4.h> #include "generic.h" static struct resource sa1111_resources[] = { [0] = DEFINE_RES_MEM(BADGE4_SA1111_BASE, 0x2000), [1] = DEFINE_RES_IRQ(BADGE4_IRQ_GPIO_SA1111), }; static int badge4_sa1111_enable(void *data, unsigned devid) { if (devid == SA1111_DEVID_USB) badge4_set_5V(BADGE4_5V_USB, 1); return 0; } static void badge4_sa1111_disable(void *data, unsigned devid) { if (devid == SA1111_DEVID_USB) badge4_set_5V(BADGE4_5V_USB, 0); } static struct sa1111_platform_data sa1111_info = { .disable_devs = SA1111_DEVID_PS2_MSE, .enable = badge4_sa1111_enable, .disable = badge4_sa1111_disable, }; static u64 sa1111_dmamask = 0xffffffffUL; static struct platform_device sa1111_device = { .name = "sa1111", .id = 0, .dev = { .dma_mask = &sa1111_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &sa1111_info, }, .num_resources = ARRAY_SIZE(sa1111_resources), .resource = sa1111_resources, }; static struct platform_device *devices[] __initdata = { &sa1111_device, }; static int __init badge4_sa1111_init(void) { /* * Ensure that the memory bus request/grant signals are setup, * and the grant is held in its inactive state */ sa1110_mb_disable(); /* * Probe for SA1111. */ return platform_add_devices(devices, ARRAY_SIZE(devices)); } /* * 1 x Intel 28F320C3 Advanced+ Boot Block Flash (32 Mi bit) * Eight 4 KiW Parameter Bottom Blocks (64 KiB) * Sixty-three 32 KiW Main Blocks (4032 Ki b) * * <or> * * 1 x Intel 28F640C3 Advanced+ Boot Block Flash (64 Mi bit) * Eight 4 KiW Parameter Bottom Blocks (64 KiB) * One-hundred-twenty-seven 32 KiW Main Blocks (8128 Ki b) */ static struct mtd_partition badge4_partitions[] = { { .name = "BLOB boot loader", .offset = 0, .size = 0x0000A000 }, { .name = "params", .offset = MTDPART_OFS_APPEND, .size = 0x00006000 }, { .name = "root", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL } }; static struct flash_platform_data badge4_flash_data = { .map_name = "cfi_probe", .parts = badge4_partitions, .nr_parts = ARRAY_SIZE(badge4_partitions), }; static struct resource badge4_flash_resource = DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_64M); static int five_v_on __initdata = 0; static int __init five_v_on_setup(char *ignore) { five_v_on = 1; return 1; } __setup("five_v_on", five_v_on_setup); static int __init badge4_init(void) { int ret; if (!machine_is_badge4()) return -ENODEV; /* LCD */ GPCR = (BADGE4_GPIO_LGP2 | BADGE4_GPIO_LGP3 | BADGE4_GPIO_LGP4 | BADGE4_GPIO_LGP5 | BADGE4_GPIO_LGP6 | BADGE4_GPIO_LGP7 | BADGE4_GPIO_LGP8 | BADGE4_GPIO_LGP9 | BADGE4_GPIO_GPA_VID | BADGE4_GPIO_GPB_VID | BADGE4_GPIO_GPC_VID); GPDR &= ~BADGE4_GPIO_INT_VID; GPDR |= (BADGE4_GPIO_LGP2 | BADGE4_GPIO_LGP3 | BADGE4_GPIO_LGP4 | BADGE4_GPIO_LGP5 | BADGE4_GPIO_LGP6 | BADGE4_GPIO_LGP7 | BADGE4_GPIO_LGP8 | BADGE4_GPIO_LGP9 | BADGE4_GPIO_GPA_VID | BADGE4_GPIO_GPB_VID | BADGE4_GPIO_GPC_VID); /* SDRAM SPD i2c */ GPCR = (BADGE4_GPIO_SDSDA | BADGE4_GPIO_SDSCL); GPDR |= (BADGE4_GPIO_SDSDA | BADGE4_GPIO_SDSCL); /* uart */ GPCR = (BADGE4_GPIO_UART_HS1 | BADGE4_GPIO_UART_HS2); GPDR |= (BADGE4_GPIO_UART_HS1 | BADGE4_GPIO_UART_HS2); /* CPLD muxsel0 input for mux/adc chip select */ GPCR = BADGE4_GPIO_MUXSEL0; GPDR |= BADGE4_GPIO_MUXSEL0; /* test points: J5, J6 as inputs, J7 outputs */ GPDR &= ~(BADGE4_GPIO_TESTPT_J5 | BADGE4_GPIO_TESTPT_J6); GPCR = BADGE4_GPIO_TESTPT_J7; GPDR |= BADGE4_GPIO_TESTPT_J7; /* 5V supply rail. */ GPCR = BADGE4_GPIO_PCMEN5V; /* initially off */ GPDR |= BADGE4_GPIO_PCMEN5V; /* CPLD sdram type inputs; set up by blob */ //GPDR |= (BADGE4_GPIO_SDTYP1 | BADGE4_GPIO_SDTYP0); printk(KERN_DEBUG __FILE__ ": SDRAM CPLD typ1=%d typ0=%d\n", !!(GPLR & BADGE4_GPIO_SDTYP1), !!(GPLR & BADGE4_GPIO_SDTYP0)); /* SA1111 reset pin; set up by blob */ //GPSR = BADGE4_GPIO_SA1111_NRST; //GPDR |= BADGE4_GPIO_SA1111_NRST; /* power management cruft */ PGSR = 0; PWER = 0; PCFR = 0; PSDR = 0; PWER |= PWER_GPIO26; /* wake up on an edge from TESTPT_J5 */ PWER |= PWER_RTC; /* wake up if rtc fires */ /* drive sa1111_nrst during sleep */ PGSR |= BADGE4_GPIO_SA1111_NRST; /* drive CPLD as is during sleep */ PGSR |= (GPLR & (BADGE4_GPIO_SDTYP0|BADGE4_GPIO_SDTYP1)); /* Now bring up the SA-1111. */ ret = badge4_sa1111_init(); if (ret < 0) printk(KERN_ERR "%s: SA-1111 initialization failed (%d)\n", __func__, ret); /* maybe turn on 5v0 from the start */ badge4_set_5V(BADGE4_5V_INITIALLY, five_v_on); sa11x0_register_mtd(&badge4_flash_data, &badge4_flash_resource, 1); return 0; } arch_initcall(badge4_init); static unsigned badge4_5V_bitmap = 0; void badge4_set_5V(unsigned subsystem, int on) { unsigned long flags; unsigned old_5V_bitmap; local_irq_save(flags); old_5V_bitmap = badge4_5V_bitmap; if (on) { badge4_5V_bitmap |= subsystem; } else { badge4_5V_bitmap &= ~subsystem; } /* detect on->off and off->on transitions */ if ((!old_5V_bitmap) && (badge4_5V_bitmap)) { /* was off, now on */ printk(KERN_INFO "%s: enabling 5V supply rail\n", __func__); GPSR = BADGE4_GPIO_PCMEN5V; } else if ((old_5V_bitmap) && (!badge4_5V_bitmap)) { /* was on, now off */ printk(KERN_INFO "%s: disabling 5V supply rail\n", __func__); GPCR = BADGE4_GPIO_PCMEN5V; } local_irq_restore(flags); } EXPORT_SYMBOL(badge4_set_5V); static struct map_desc badge4_io_desc[] __initdata = { { /* SRAM bank 1 */ .virtual = 0xf1000000, .pfn = __phys_to_pfn(0x08000000), .length = 0x00100000, .type = MT_DEVICE }, { /* SRAM bank 2 */ .virtual = 0xf2000000, .pfn = __phys_to_pfn(0x10000000), .length = 0x00100000, .type = MT_DEVICE } }; static void badge4_uart_pm(struct uart_port *port, u_int state, u_int oldstate) { if (!state) { Ser1SDCR0 |= SDCR0_UART; } } static struct sa1100_port_fns badge4_port_fns __initdata = { //.get_mctrl = badge4_get_mctrl, //.set_mctrl = badge4_set_mctrl, .pm = badge4_uart_pm, }; static void __init badge4_map_io(void) { sa1100_map_io(); iotable_init(badge4_io_desc, ARRAY_SIZE(badge4_io_desc)); sa1100_register_uart_fns(&badge4_port_fns); sa1100_register_uart(0, 3); sa1100_register_uart(1, 1); } MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4") .atag_offset = 0x100, .map_io = badge4_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .timer = &sa1100_timer, #ifdef CONFIG_SA1111 .dma_zone_size = SZ_1M, #endif .restart = sa11x0_restart, MACHINE_END
gpl-2.0
InnoSum/linux-sunxi
arch/parisc/kernel/setup.c
4991
9929
/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de> * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net> * * Initial PA-RISC Version: 04-23-1999 by Helge Deller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/initrd.h> #include <linux/init.h> #include <linux/console.h> #include <linux/seq_file.h> #define PCI_DEBUG #include <linux/pci.h> #undef PCI_DEBUG #include <linux/proc_fs.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/pdc.h> #include <asm/led.h> #include <asm/machdep.h> /* for pa7300lc_init() proto */ #include <asm/pdc_chassis.h> #include <asm/io.h> #include <asm/setup.h> #include <asm/unwind.h> static char __initdata command_line[COMMAND_LINE_SIZE]; /* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */ struct proc_dir_entry * proc_runway_root __read_mostly = NULL; struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL; #if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA)) int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */ EXPORT_SYMBOL(parisc_bus_is_phys); #endif void __init setup_cmdline(char **cmdline_p) { extern unsigned int boot_args[]; /* Collect stuff passed in from the boot loader */ /* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */ if (boot_args[0] < 64) { /* called from hpux boot loader */ boot_command_line[0] = '\0'; } else { strcpy(boot_command_line, (char *)__va(boot_args[1])); #ifdef CONFIG_BLK_DEV_INITRD if (boot_args[2] != 0) /* did palo pass us a ramdisk? */ { initrd_start = (unsigned long)__va(boot_args[2]); initrd_end = (unsigned long)__va(boot_args[3]); } #endif } strcpy(command_line, boot_command_line); *cmdline_p = command_line; } #ifdef CONFIG_PA11 void __init dma_ops_init(void) { switch (boot_cpu_data.cpu_type) { case pcx: /* * We've got way too many dependencies on 1.1 semantics * to support 1.0 boxes at this point. */ panic( "PA-RISC Linux currently only supports machines that conform to\n" "the PA-RISC 1.1 or 2.0 architecture specification.\n"); case pcxs: case pcxt: hppa_dma_ops = &pcx_dma_ops; break; case pcxl2: pa7300lc_init(); case pcxl: /* falls through */ hppa_dma_ops = &pcxl_dma_ops; break; default: break; } } #endif extern int init_per_cpu(int cpuid); extern void collect_boot_cpu_data(void); void __init setup_arch(char **cmdline_p) { #ifdef CONFIG_64BIT extern int parisc_narrow_firmware; #endif unwind_init(); init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */ #ifdef CONFIG_64BIT printk(KERN_INFO "The 64-bit Kernel has started...\n"); #else printk(KERN_INFO "The 32-bit Kernel has started...\n"); #endif pdc_console_init(); #ifdef CONFIG_64BIT if(parisc_narrow_firmware) { printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n"); } #endif setup_pdc(); setup_cmdline(cmdline_p); collect_boot_cpu_data(); do_memory_inventory(); /* probe for physical memory */ parisc_cache_init(); paging_init(); #ifdef CONFIG_CHASSIS_LCD_LED /* initialize the LCD/LED after boot_cpu_data is available ! */ led_init(); /* LCD/LED initialization */ #endif #ifdef CONFIG_PA11 dma_ops_init(); #endif #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; /* we use take_over_console() later ! */ #endif } /* * Display CPU info for all CPUs. * for parisc this is in processor.c */ extern int show_cpuinfo (struct seq_file *m, void *v); static void * c_start (struct seq_file *m, loff_t *pos) { /* Looks like the caller will call repeatedly until we return * 0, signaling EOF perhaps. This could be used to sequence * through CPUs for example. Since we print all cpu info in our * show_cpuinfo() disregarding 'pos' (which I assume is 'v' above) * we only allow for one "position". */ return ((long)*pos < 1) ? (void *)1 : NULL; } static void * c_next (struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop (struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo }; static void __init parisc_proc_mkdir(void) { /* ** Can't call proc_mkdir() until after proc_root_init() has been ** called by start_kernel(). In other words, this code can't ** live in arch/.../setup.c because start_parisc() calls ** start_kernel(). */ switch (boot_cpu_data.cpu_type) { case pcxl: case pcxl2: if (NULL == proc_gsc_root) { proc_gsc_root = proc_mkdir("bus/gsc", NULL); } break; case pcxt_: case pcxu: case pcxu_: case pcxw: case pcxw_: case pcxw2: if (NULL == proc_runway_root) { proc_runway_root = proc_mkdir("bus/runway", NULL); } break; case mako: case mako2: if (NULL == proc_mckinley_root) { proc_mckinley_root = proc_mkdir("bus/mckinley", NULL); } break; default: /* FIXME: this was added to prevent the compiler * complaining about missing pcx, pcxs and pcxt * I'm assuming they have neither gsc nor runway */ break; } } static struct resource central_bus = { .name = "Central Bus", .start = F_EXTEND(0xfff80000), .end = F_EXTEND(0xfffaffff), .flags = IORESOURCE_MEM, }; static struct resource local_broadcast = { .name = "Local Broadcast", .start = F_EXTEND(0xfffb0000), .end = F_EXTEND(0xfffdffff), .flags = IORESOURCE_MEM, }; static struct resource global_broadcast = { .name = "Global Broadcast", .start = F_EXTEND(0xfffe0000), .end = F_EXTEND(0xffffffff), .flags = IORESOURCE_MEM, }; static int __init parisc_init_resources(void) { int result; result = request_resource(&iomem_resource, &central_bus); if (result < 0) { printk(KERN_ERR "%s: failed to claim %s address space!\n", __FILE__, central_bus.name); return result; } result = request_resource(&iomem_resource, &local_broadcast); if (result < 0) { printk(KERN_ERR "%s: failed to claim %saddress space!\n", __FILE__, local_broadcast.name); return result; } result = request_resource(&iomem_resource, &global_broadcast); if (result < 0) { printk(KERN_ERR "%s: failed to claim %s address space!\n", __FILE__, global_broadcast.name); return result; } return 0; } extern void gsc_init(void); extern void processor_init(void); extern void ccio_init(void); extern void hppb_init(void); extern void dino_init(void); extern void iosapic_init(void); extern void lba_init(void); extern void sba_init(void); extern void eisa_init(void); static int __init parisc_init(void) { u32 osid = (OS_ID_LINUX << 16); parisc_proc_mkdir(); parisc_init_resources(); do_device_inventory(); /* probe for hardware */ parisc_pdc_chassis_init(); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART); /* tell PDC we're Linux. Nevermind failure. */ pdc_stable_write(0x40, &osid, sizeof(osid)); processor_init(); printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", num_present_cpus(), boot_cpu_data.cpu_name, boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); parisc_setup_cache_timing(); /* These are in a non-obvious order, will fix when we have an iotree */ #if defined(CONFIG_IOSAPIC) iosapic_init(); #endif #if defined(CONFIG_IOMMU_SBA) sba_init(); #endif #if defined(CONFIG_PCI_LBA) lba_init(); #endif /* CCIO before any potential subdevices */ #if defined(CONFIG_IOMMU_CCIO) ccio_init(); #endif /* * Need to register Asp & Wax before the EISA adapters for the IRQ * regions. EISA must come before PCI to be sure it gets IRQ region * 0. */ #if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX) gsc_init(); #endif #ifdef CONFIG_EISA eisa_init(); #endif #if defined(CONFIG_HPPB) hppb_init(); #endif #if defined(CONFIG_GSC_DINO) dino_init(); #endif #ifdef CONFIG_CHASSIS_LCD_LED register_led_regions(); /* register LED port info in procfs */ #endif return 0; } arch_initcall(parisc_init); void start_parisc(void) { extern void start_kernel(void); int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; cpunum = smp_processor_id(); set_firmware_width_unlocked(); ret = pdc_coproc_cfg_unlocked(&coproc_cfg); if (ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; asm volatile ("fstd %fr0,8(%sp)"); } else { panic("must have an fpu to boot linux"); } start_kernel(); // not reached }
gpl-2.0
samuaz/kernel_msm_gee
drivers/media/dvb/dvb-usb/af9005.c
7295
27677
/* DVB USB compliant Linux driver for the Afatech 9005 * USB1.1 DVB-T receiver. * * Copyright (C) 2007 Luca Olivetti (luca@ventoso.org) * * Thanks to Afatech who kindly provided information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * see Documentation/dvb/README.dvb-usb for more information */ #include "af9005.h" /* debug */ int dvb_usb_af9005_debug; module_param_named(debug, dvb_usb_af9005_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))." DVB_USB_DEBUG_STATUS); /* enable obnoxious led */ bool dvb_usb_af9005_led = 1; module_param_named(led, dvb_usb_af9005_led, bool, 0644); MODULE_PARM_DESC(led, "enable led (default: 1)."); /* eeprom dump */ static int dvb_usb_af9005_dump_eeprom; module_param_named(dump_eeprom, dvb_usb_af9005_dump_eeprom, int, 0); MODULE_PARM_DESC(dump_eeprom, "dump contents of the eeprom."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* remote control decoder */ static int (*rc_decode) (struct dvb_usb_device *d, u8 *data, int len, u32 *event, int *state); static void *rc_keys; static int *rc_keys_size; u8 regmask[8] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff }; struct af9005_device_state { u8 sequence; int led_state; }; static int af9005_generic_read_write(struct dvb_usb_device *d, u16 reg, int readwrite, int type, u8 * values, int len) { struct af9005_device_state *st = d->priv; u8 obuf[16] = { 0 }; u8 ibuf[17] = { 0 }; u8 command; int i; int ret; if (len < 1) { err("generic read/write, less than 1 byte. Makes no sense."); return -EINVAL; } if (len > 8) { err("generic read/write, more than 8 bytes. Not supported."); return -EINVAL; } obuf[0] = 14; /* rest of buffer length low */ obuf[1] = 0; /* rest of buffer length high */ obuf[2] = AF9005_REGISTER_RW; /* register operation */ obuf[3] = 12; /* rest of buffer length */ obuf[4] = st->sequence++; /* sequence number */ obuf[5] = (u8) (reg >> 8); /* register address */ obuf[6] = (u8) (reg & 0xff); if (type == AF9005_OFDM_REG) { command = AF9005_CMD_OFDM_REG; } else { command = AF9005_CMD_TUNER; } if (len > 1) command |= AF9005_CMD_BURST | AF9005_CMD_AUTOINC | (len - 1) << 3; command |= readwrite; if (readwrite == AF9005_CMD_WRITE) for (i = 0; i < len; i++) obuf[8 + i] = values[i]; else if (type == AF9005_TUNER_REG) /* read command for tuner, the first byte contains the i2c address */ obuf[8] = values[0]; obuf[7] = command; ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 17, 0); if (ret) return ret; /* sanity check */ if (ibuf[2] != AF9005_REGISTER_RW_ACK) { err("generic read/write, wrong reply code."); return -EIO; } if (ibuf[3] != 0x0d) { err("generic read/write, wrong length in reply."); return -EIO; } if (ibuf[4] != obuf[4]) { err("generic read/write, wrong sequence in reply."); return -EIO; } /* Windows driver doesn't check these fields, in fact sometimes the register in the reply is different that what has been sent if (ibuf[5] != obuf[5] || ibuf[6] != obuf[6]) { err("generic read/write, wrong register in reply."); return -EIO; } if (ibuf[7] != command) { err("generic read/write wrong command in reply."); return -EIO; } */ if (ibuf[16] != 0x01) { err("generic read/write wrong status code in reply."); return -EIO; } if (readwrite == AF9005_CMD_READ) for (i = 0; i < len; i++) values[i] = ibuf[8 + i]; return 0; } int af9005_read_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 * value) { int ret; deb_reg("read register %x ", reg); ret = af9005_generic_read_write(d, reg, AF9005_CMD_READ, AF9005_OFDM_REG, value, 1); if (ret) deb_reg("failed\n"); else deb_reg("value %x\n", *value); return ret; } int af9005_read_ofdm_registers(struct dvb_usb_device *d, u16 reg, u8 * values, int len) { int ret; deb_reg("read %d registers %x ", len, reg); ret = af9005_generic_read_write(d, reg, AF9005_CMD_READ, AF9005_OFDM_REG, values, len); if (ret) deb_reg("failed\n"); else debug_dump(values, len, deb_reg); return ret; } int af9005_write_ofdm_register(struct dvb_usb_device *d, u16 reg, u8 value) { int ret; u8 temp = value; deb_reg("write register %x value %x ", reg, value); ret = af9005_generic_read_write(d, reg, AF9005_CMD_WRITE, AF9005_OFDM_REG, &temp, 1); if (ret) deb_reg("failed\n"); else deb_reg("ok\n"); return ret; } int af9005_write_ofdm_registers(struct dvb_usb_device *d, u16 reg, u8 * values, int len) { int ret; deb_reg("write %d registers %x values ", len, reg); debug_dump(values, len, deb_reg); ret = af9005_generic_read_write(d, reg, AF9005_CMD_WRITE, AF9005_OFDM_REG, values, len); if (ret) deb_reg("failed\n"); else deb_reg("ok\n"); return ret; } int af9005_read_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos, u8 len, u8 * value) { u8 temp; int ret; deb_reg("read bits %x %x %x", reg, pos, len); ret = af9005_read_ofdm_register(d, reg, &temp); if (ret) { deb_reg(" failed\n"); return ret; } *value = (temp >> pos) & regmask[len - 1]; deb_reg(" value %x\n", *value); return 0; } int af9005_write_register_bits(struct dvb_usb_device *d, u16 reg, u8 pos, u8 len, u8 value) { u8 temp, mask; int ret; deb_reg("write bits %x %x %x value %x\n", reg, pos, len, value); if (pos == 0 && len == 8) return af9005_write_ofdm_register(d, reg, value); ret = af9005_read_ofdm_register(d, reg, &temp); if (ret) return ret; mask = regmask[len - 1] << pos; temp = (temp & ~mask) | ((value << pos) & mask); return af9005_write_ofdm_register(d, reg, temp); } static int af9005_usb_read_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 * values, int len) { return af9005_generic_read_write(d, reg, AF9005_CMD_READ, AF9005_TUNER_REG, values, len); } static int af9005_usb_write_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 * values, int len) { return af9005_generic_read_write(d, reg, AF9005_CMD_WRITE, AF9005_TUNER_REG, values, len); } int af9005_write_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 * values, int len) { /* don't let the name of this function mislead you: it's just used as an interface from the firmware to the i2c bus. The actual i2c addresses are contained in the data */ int ret, i, done = 0, fail = 0; u8 temp; ret = af9005_usb_write_tuner_registers(d, reg, values, len); if (ret) return ret; if (reg != 0xffff) { /* check if write done (0xa40d bit 1) or fail (0xa40d bit 2) */ for (i = 0; i < 200; i++) { ret = af9005_read_ofdm_register(d, xd_I2C_i2c_m_status_wdat_done, &temp); if (ret) return ret; done = temp & (regmask[i2c_m_status_wdat_done_len - 1] << i2c_m_status_wdat_done_pos); if (done) break; fail = temp & (regmask[i2c_m_status_wdat_fail_len - 1] << i2c_m_status_wdat_fail_pos); if (fail) break; msleep(50); } if (i == 200) return -ETIMEDOUT; if (fail) { /* clear write fail bit */ af9005_write_register_bits(d, xd_I2C_i2c_m_status_wdat_fail, i2c_m_status_wdat_fail_pos, i2c_m_status_wdat_fail_len, 1); return -EIO; } /* clear write done bit */ ret = af9005_write_register_bits(d, xd_I2C_i2c_m_status_wdat_fail, i2c_m_status_wdat_done_pos, i2c_m_status_wdat_done_len, 1); if (ret) return ret; } return 0; } int af9005_read_tuner_registers(struct dvb_usb_device *d, u16 reg, u8 addr, u8 * values, int len) { /* don't let the name of this function mislead you: it's just used as an interface from the firmware to the i2c bus. The actual i2c addresses are contained in the data */ int ret, i; u8 temp, buf[2]; buf[0] = addr; /* tuner i2c address */ buf[1] = values[0]; /* tuner register */ values[0] = addr + 0x01; /* i2c read address */ if (reg == APO_REG_I2C_RW_SILICON_TUNER) { /* write tuner i2c address to tuner, 0c00c0 undocumented, found by sniffing */ ret = af9005_write_tuner_registers(d, 0x00c0, buf, 2); if (ret) return ret; } /* send read command to ofsm */ ret = af9005_usb_read_tuner_registers(d, reg, values, 1); if (ret) return ret; /* check if read done */ for (i = 0; i < 200; i++) { ret = af9005_read_ofdm_register(d, 0xa408, &temp); if (ret) return ret; if (temp & 0x01) break; msleep(50); } if (i == 200) return -ETIMEDOUT; /* clear read done bit (by writing 1) */ ret = af9005_write_ofdm_register(d, xd_I2C_i2c_m_data8, 1); if (ret) return ret; /* get read data (available from 0xa400) */ for (i = 0; i < len; i++) { ret = af9005_read_ofdm_register(d, 0xa400 + i, &temp); if (ret) return ret; values[i] = temp; } return 0; } static int af9005_i2c_write(struct dvb_usb_device *d, u8 i2caddr, u8 reg, u8 * data, int len) { int ret, i; u8 buf[3]; deb_i2c("i2c_write i2caddr %x, reg %x, len %d data ", i2caddr, reg, len); debug_dump(data, len, deb_i2c); for (i = 0; i < len; i++) { buf[0] = i2caddr; buf[1] = reg + (u8) i; buf[2] = data[i]; ret = af9005_write_tuner_registers(d, APO_REG_I2C_RW_SILICON_TUNER, buf, 3); if (ret) { deb_i2c("i2c_write failed\n"); return ret; } } deb_i2c("i2c_write ok\n"); return 0; } static int af9005_i2c_read(struct dvb_usb_device *d, u8 i2caddr, u8 reg, u8 * data, int len) { int ret, i; u8 temp; deb_i2c("i2c_read i2caddr %x, reg %x, len %d\n ", i2caddr, reg, len); for (i = 0; i < len; i++) { temp = reg + i; ret = af9005_read_tuner_registers(d, APO_REG_I2C_RW_SILICON_TUNER, i2caddr, &temp, 1); if (ret) { deb_i2c("i2c_read failed\n"); return ret; } data[i] = temp; } deb_i2c("i2c data read: "); debug_dump(data, len, deb_i2c); return 0; } static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { /* only implements what the mt2060 module does, don't know how to make it really generic */ struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; u8 reg, addr; u8 *value; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; if (num > 2) warn("more than 2 i2c messages at a time is not handled yet. TODO."); if (num == 2) { /* reads a single register */ reg = *msg[0].buf; addr = msg[0].addr; value = msg[1].buf; ret = af9005_i2c_read(d, addr, reg, value, 1); if (ret == 0) ret = 2; } else { /* write one or more registers */ reg = msg[0].buf[0]; addr = msg[0].addr; value = &msg[0].buf[1]; ret = af9005_i2c_write(d, addr, reg, value, msg[0].len - 1); if (ret == 0) ret = 1; } mutex_unlock(&d->i2c_mutex); return ret; } static u32 af9005_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm af9005_i2c_algo = { .master_xfer = af9005_i2c_xfer, .functionality = af9005_i2c_func, }; int af9005_send_command(struct dvb_usb_device *d, u8 command, u8 * wbuf, int wlen, u8 * rbuf, int rlen) { struct af9005_device_state *st = d->priv; int ret, i, packet_len; u8 buf[64]; u8 ibuf[64]; if (wlen < 0) { err("send command, wlen less than 0 bytes. Makes no sense."); return -EINVAL; } if (wlen > 54) { err("send command, wlen more than 54 bytes. Not supported."); return -EINVAL; } if (rlen > 54) { err("send command, rlen more than 54 bytes. Not supported."); return -EINVAL; } packet_len = wlen + 5; buf[0] = (u8) (packet_len & 0xff); buf[1] = (u8) ((packet_len & 0xff00) >> 8); buf[2] = 0x26; /* packet type */ buf[3] = wlen + 3; buf[4] = st->sequence++; buf[5] = command; buf[6] = wlen; for (i = 0; i < wlen; i++) buf[7 + i] = wbuf[i]; ret = dvb_usb_generic_rw(d, buf, wlen + 7, ibuf, rlen + 7, 0); if (ret) return ret; if (ibuf[2] != 0x27) { err("send command, wrong reply code."); return -EIO; } if (ibuf[4] != buf[4]) { err("send command, wrong sequence in reply."); return -EIO; } if (ibuf[5] != 0x01) { err("send command, wrong status code in reply."); return -EIO; } if (ibuf[6] != rlen) { err("send command, invalid data length in reply."); return -EIO; } for (i = 0; i < rlen; i++) rbuf[i] = ibuf[i + 7]; return 0; } int af9005_read_eeprom(struct dvb_usb_device *d, u8 address, u8 * values, int len) { struct af9005_device_state *st = d->priv; u8 obuf[16], ibuf[14]; int ret, i; memset(obuf, 0, sizeof(obuf)); memset(ibuf, 0, sizeof(ibuf)); obuf[0] = 14; /* length of rest of packet low */ obuf[1] = 0; /* length of rest of packer high */ obuf[2] = 0x2a; /* read/write eeprom */ obuf[3] = 12; /* size */ obuf[4] = st->sequence++; obuf[5] = 0; /* read */ obuf[6] = len; obuf[7] = address; ret = dvb_usb_generic_rw(d, obuf, 16, ibuf, 14, 0); if (ret) return ret; if (ibuf[2] != 0x2b) { err("Read eeprom, invalid reply code"); return -EIO; } if (ibuf[3] != 10) { err("Read eeprom, invalid reply length"); return -EIO; } if (ibuf[4] != obuf[4]) { err("Read eeprom, wrong sequence in reply "); return -EIO; } if (ibuf[5] != 1) { err("Read eeprom, wrong status in reply "); return -EIO; } for (i = 0; i < len; i++) { values[i] = ibuf[6 + i]; } return 0; } static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply) { u8 buf[FW_BULKOUT_SIZE + 2]; u16 checksum; int act_len, i, ret; memset(buf, 0, sizeof(buf)); buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); switch (type) { case FW_CONFIG: buf[2] = 0x11; buf[3] = 0x04; buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */ buf[5] = 0x03; checksum = buf[4] + buf[5]; buf[6] = (u8) ((checksum >> 8) & 0xff); buf[7] = (u8) (checksum & 0xff); break; case FW_CONFIRM: buf[2] = 0x11; buf[3] = 0x04; buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */ buf[5] = 0x01; checksum = buf[4] + buf[5]; buf[6] = (u8) ((checksum >> 8) & 0xff); buf[7] = (u8) (checksum & 0xff); break; case FW_BOOT: buf[2] = 0x10; buf[3] = 0x08; buf[4] = 0x00; /* sequence number, original driver doesn't increment it here */ buf[5] = 0x97; buf[6] = 0xaa; buf[7] = 0x55; buf[8] = 0xa5; buf[9] = 0x5a; checksum = 0; for (i = 4; i <= 9; i++) checksum += buf[i]; buf[10] = (u8) ((checksum >> 8) & 0xff); buf[11] = (u8) (checksum & 0xff); break; default: err("boot packet invalid boot packet type"); return -EINVAL; } deb_fw(">>> "); debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x02), buf, FW_BULKOUT_SIZE + 2, &act_len, 2000); if (ret) err("boot packet bulk message failed: %d (%d/%d)", ret, FW_BULKOUT_SIZE + 2, act_len); else ret = act_len != FW_BULKOUT_SIZE + 2 ? -1 : 0; if (ret) return ret; memset(buf, 0, 9); ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, 0x01), buf, 9, &act_len, 2000); if (ret) { err("boot packet recv bulk message failed: %d", ret); return ret; } deb_fw("<<< "); debug_dump(buf, act_len, deb_fw); checksum = 0; switch (type) { case FW_CONFIG: if (buf[2] != 0x11) { err("boot bad config header."); return -EIO; } if (buf[3] != 0x05) { err("boot bad config size."); return -EIO; } if (buf[4] != 0x00) { err("boot bad config sequence."); return -EIO; } if (buf[5] != 0x04) { err("boot bad config subtype."); return -EIO; } for (i = 4; i <= 6; i++) checksum += buf[i]; if (buf[7] * 256 + buf[8] != checksum) { err("boot bad config checksum."); return -EIO; } *reply = buf[6]; break; case FW_CONFIRM: if (buf[2] != 0x11) { err("boot bad confirm header."); return -EIO; } if (buf[3] != 0x05) { err("boot bad confirm size."); return -EIO; } if (buf[4] != 0x00) { err("boot bad confirm sequence."); return -EIO; } if (buf[5] != 0x02) { err("boot bad confirm subtype."); return -EIO; } for (i = 4; i <= 6; i++) checksum += buf[i]; if (buf[7] * 256 + buf[8] != checksum) { err("boot bad confirm checksum."); return -EIO; } *reply = buf[6]; break; case FW_BOOT: if (buf[2] != 0x10) { err("boot bad boot header."); return -EIO; } if (buf[3] != 0x05) { err("boot bad boot size."); return -EIO; } if (buf[4] != 0x00) { err("boot bad boot sequence."); return -EIO; } if (buf[5] != 0x01) { err("boot bad boot pattern 01."); return -EIO; } if (buf[6] != 0x10) { err("boot bad boot pattern 10."); return -EIO; } for (i = 4; i <= 6; i++) checksum += buf[i]; if (buf[7] * 256 + buf[8] != checksum) { err("boot bad boot checksum."); return -EIO; } break; } return 0; } static int af9005_download_firmware(struct usb_device *udev, const struct firmware *fw) { int i, packets, ret, act_len; u8 buf[FW_BULKOUT_SIZE + 2]; u8 reply; ret = af9005_boot_packet(udev, FW_CONFIG, &reply); if (ret) return ret; if (reply != 0x01) { err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); return -EIO; } packets = fw->size / FW_BULKOUT_SIZE; buf[0] = (u8) (FW_BULKOUT_SIZE & 0xff); buf[1] = (u8) ((FW_BULKOUT_SIZE >> 8) & 0xff); for (i = 0; i < packets; i++) { memcpy(&buf[2], fw->data + i * FW_BULKOUT_SIZE, FW_BULKOUT_SIZE); deb_fw(">>> "); debug_dump(buf, FW_BULKOUT_SIZE + 2, deb_fw); ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x02), buf, FW_BULKOUT_SIZE + 2, &act_len, 1000); if (ret) { err("firmware download failed at packet %d with code %d", i, ret); return ret; } } ret = af9005_boot_packet(udev, FW_CONFIRM, &reply); if (ret) return ret; if (reply != (u8) (packets & 0xff)) { err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); return -EIO; } ret = af9005_boot_packet(udev, FW_BOOT, &reply); if (ret) return ret; ret = af9005_boot_packet(udev, FW_CONFIG, &reply); if (ret) return ret; if (reply != 0x02) { err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); return -EIO; } return 0; } int af9005_led_control(struct dvb_usb_device *d, int onoff) { struct af9005_device_state *st = d->priv; int temp, ret; if (onoff && dvb_usb_af9005_led) temp = 1; else temp = 0; if (st->led_state != temp) { ret = af9005_write_register_bits(d, xd_p_reg_top_locken1, reg_top_locken1_pos, reg_top_locken1_len, temp); if (ret) return ret; ret = af9005_write_register_bits(d, xd_p_reg_top_lock1, reg_top_lock1_pos, reg_top_lock1_len, temp); if (ret) return ret; st->led_state = temp; } return 0; } static int af9005_frontend_attach(struct dvb_usb_adapter *adap) { u8 buf[8]; int i; /* without these calls the first commands after downloading the firmware fail. I put these calls here to simulate what it is done in dvb-usb-init.c. */ struct usb_device *udev = adap->dev->udev; usb_clear_halt(udev, usb_sndbulkpipe(udev, 2)); usb_clear_halt(udev, usb_rcvbulkpipe(udev, 1)); if (dvb_usb_af9005_dump_eeprom) { printk("EEPROM DUMP\n"); for (i = 0; i < 255; i += 8) { af9005_read_eeprom(adap->dev, i, buf, 8); printk("ADDR %x ", i); debug_dump(buf, 8, printk); } } adap->fe_adap[0].fe = af9005_fe_attach(adap->dev); return 0; } static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) { struct af9005_device_state *st = d->priv; int ret, len; u8 obuf[5]; u8 ibuf[256]; *state = REMOTE_NO_KEY_PRESSED; if (rc_decode == NULL) { /* it shouldn't never come here */ return 0; } /* deb_info("rc_query\n"); */ obuf[0] = 3; /* rest of packet length low */ obuf[1] = 0; /* rest of packet lentgh high */ obuf[2] = 0x40; /* read remote */ obuf[3] = 1; /* rest of packet length */ obuf[4] = st->sequence++; /* sequence number */ ret = dvb_usb_generic_rw(d, obuf, 5, ibuf, 256, 0); if (ret) { err("rc query failed"); return ret; } if (ibuf[2] != 0x41) { err("rc query bad header."); return -EIO; } if (ibuf[4] != obuf[4]) { err("rc query bad sequence."); return -EIO; } len = ibuf[5]; if (len > 246) { err("rc query invalid length"); return -EIO; } if (len > 0) { deb_rc("rc data (%d) ", len); debug_dump((ibuf + 6), len, deb_rc); ret = rc_decode(d, &ibuf[6], len, event, state); if (ret) { err("rc_decode failed"); return ret; } else { deb_rc("rc_decode state %x event %x\n", *state, *event); if (*state == REMOTE_KEY_REPEAT) *event = d->last_event; } } return 0; } static int af9005_power_ctrl(struct dvb_usb_device *d, int onoff) { return 0; } static int af9005_pid_filter_control(struct dvb_usb_adapter *adap, int onoff) { int ret; deb_info("pid filter control onoff %d\n", onoff); if (onoff) { ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1); if (ret) return ret; ret = af9005_write_register_bits(adap->dev, XD_MP2IF_DMX_CTRL, 1, 1, 1); if (ret) return ret; ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 1); } else ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_DMX_CTRL, 0); if (ret) return ret; deb_info("pid filter control ok\n"); return 0; } static int af9005_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { u8 cmd = index & 0x1f; int ret; deb_info("set pid filter, index %d, pid %x, onoff %d\n", index, pid, onoff); if (onoff) { /* cannot use it as pid_filter_ctrl since it has to be done before setting the first pid */ if (adap->feedcount == 1) { deb_info("first pid set, enable pid table\n"); ret = af9005_pid_filter_control(adap, onoff); if (ret) return ret; } ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_DATA_L, (u8) (pid & 0xff)); if (ret) return ret; ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_DATA_H, (u8) (pid >> 8)); if (ret) return ret; cmd |= 0x20 | 0x40; } else { if (adap->feedcount == 0) { deb_info("last pid unset, disable pid table\n"); ret = af9005_pid_filter_control(adap, onoff); if (ret) return ret; } } ret = af9005_write_ofdm_register(adap->dev, XD_MP2IF_PID_IDX, cmd); if (ret) return ret; deb_info("set pid ok\n"); return 0; } static int af9005_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { int ret; u8 reply; ret = af9005_boot_packet(udev, FW_CONFIG, &reply); if (ret) return ret; deb_info("result of FW_CONFIG in identify state %d\n", reply); if (reply == 0x01) *cold = 1; else if (reply == 0x02) *cold = 0; else return -EIO; deb_info("Identify state cold = %d\n", *cold); return 0; } static struct dvb_usb_device_properties af9005_properties; static int af9005_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &af9005_properties, THIS_MODULE, NULL, adapter_nr); } enum af9005_usb_table_entry { AFATECH_AF9005, TERRATEC_AF9005, ANSONIC_AF9005, }; static struct usb_device_id af9005_usb_table[] = { [AFATECH_AF9005] = {USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9005)}, [TERRATEC_AF9005] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_USB_XE)}, [ANSONIC_AF9005] = {USB_DEVICE(USB_VID_ANSONIC, USB_PID_ANSONIC_DVBT_USB)}, { } }; MODULE_DEVICE_TABLE(usb, af9005_usb_table); static struct dvb_usb_device_properties af9005_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "af9005.fw", .download_firmware = af9005_download_firmware, .no_reconnect = 1, .size_of_priv = sizeof(struct af9005_device_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = af9005_pid_filter, /* .pid_filter_ctrl = af9005_pid_filter_control, */ .frontend_attach = af9005_frontend_attach, /* .tuner_attach = af9005_tuner_attach, */ /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x04, .u = { .bulk = { .buffersize = 4096, /* actual size seen is 3948 */ } } }, }}, } }, .power_ctrl = af9005_power_ctrl, .identify_state = af9005_identify_state, .i2c_algo = &af9005_i2c_algo, .rc.legacy = { .rc_interval = 200, .rc_map_table = NULL, .rc_map_size = 0, .rc_query = af9005_rc_query, }, .generic_bulk_ctrl_endpoint = 2, .generic_bulk_ctrl_endpoint_response = 1, .num_device_descs = 3, .devices = { {.name = "Afatech DVB-T USB1.1 stick", .cold_ids = {&af9005_usb_table[AFATECH_AF9005], NULL}, .warm_ids = {NULL}, }, {.name = "TerraTec Cinergy T USB XE", .cold_ids = {&af9005_usb_table[TERRATEC_AF9005], NULL}, .warm_ids = {NULL}, }, {.name = "Ansonic DVB-T USB1.1 stick", .cold_ids = {&af9005_usb_table[ANSONIC_AF9005], NULL}, .warm_ids = {NULL}, }, {NULL}, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver af9005_usb_driver = { .name = "dvb_usb_af9005", .probe = af9005_usb_probe, .disconnect = dvb_usb_device_exit, .id_table = af9005_usb_table, }; /* module stuff */ static int __init af9005_usb_module_init(void) { int result; if ((result = usb_register(&af9005_usb_driver))) { err("usb_register failed. (%d)", result); return result; } rc_decode = symbol_request(af9005_rc_decode); rc_keys = symbol_request(rc_map_af9005_table); rc_keys_size = symbol_request(rc_map_af9005_table_size); if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) { err("af9005_rc_decode function not found, disabling remote"); af9005_properties.rc.legacy.rc_query = NULL; } else { af9005_properties.rc.legacy.rc_map_table = rc_keys; af9005_properties.rc.legacy.rc_map_size = *rc_keys_size; } return 0; } static void __exit af9005_usb_module_exit(void) { /* release rc decode symbols */ if (rc_decode != NULL) symbol_put(af9005_rc_decode); if (rc_keys != NULL) symbol_put(rc_map_af9005_table); if (rc_keys_size != NULL) symbol_put(rc_map_af9005_table_size); /* deregister this driver from the USB subsystem */ usb_deregister(&af9005_usb_driver); } module_init(af9005_usb_module_init); module_exit(af9005_usb_module_exit); MODULE_AUTHOR("Luca Olivetti <luca@ventoso.org>"); MODULE_DESCRIPTION("Driver for Afatech 9005 DVB-T USB1.1 stick"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
lawnn/Dorimanx-LG-G2-D802-Kernel
drivers/usb/image/microtek.c
7551
23425
/* Driver for Microtek Scanmaker X6 USB scanner, and possibly others. * * (C) Copyright 2000 John Fremlin <vii@penguinpowered.com> * (C) Copyright 2000 Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de> * * Parts shamelessly stolen from usb-storage and copyright by their * authors. Thanks to Matt Dharm for giving us permission! * * This driver implements a SCSI host controller driver and a USB * device driver. To avoid confusion, all the USB related stuff is * prefixed by mts_usb_ and all the SCSI stuff by mts_scsi_. * * Microtek (www.microtek.com) did not release the specifications for * their USB protocol to us, so we had to reverse engineer them. We * don't know for which models they are valid. * * The X6 USB has three bulk endpoints, one output (0x1) down which * commands and outgoing data are sent, and two input: 0x82 from which * normal data is read from the scanner (in packets of maximum 32 * bytes) and from which the status byte is read, and 0x83 from which * the results of a scan (or preview) are read in up to 64 * 1024 byte * chunks by the Windows driver. We don't know how much it is possible * to read at a time from 0x83. * * It seems possible to read (with URB transfers) everything from 0x82 * in one go, without bothering to read in 32 byte chunks. * * There seems to be an optimisation of a further READ implicit if * you simply read from 0x83. * * Guessed protocol: * * Send raw SCSI command to EP 0x1 * * If there is data to receive: * If the command was READ datatype=image: * Read a lot of data from EP 0x83 * Else: * Read data from EP 0x82 * Else: * If there is data to transmit: * Write it to EP 0x1 * * Read status byte from EP 0x82 * * References: * * The SCSI command set for the scanner is available from * ftp://ftp.microtek.com/microtek/devpack/ * * Microtek NV sent us a more up to date version of the document. If * you want it, just send mail. * * Status: * * Untested with multiple scanners. * Untested on SMP. * Untested on a bigendian machine. * * History: * * 20000417 starting history * 20000417 fixed load oops * 20000417 fixed unload oops * 20000419 fixed READ IMAGE detection * 20000424 started conversion to use URBs * 20000502 handled short transfers as errors * 20000513 rename and organisation of functions (john) * 20000513 added IDs for all products supported by Windows driver (john) * 20000514 Rewrote mts_scsi_queuecommand to use URBs (john) * 20000514 Version 0.0.8j * 20000514 Fix reporting of non-existent devices to SCSI layer (john) * 20000514 Added MTS_DEBUG_INT (john) * 20000514 Changed "usb-microtek" to "microtek" for consistency (john) * 20000514 Stupid bug fixes (john) * 20000514 Version 0.0.9j * 20000515 Put transfer context and URB in mts_desc (john) * 20000515 Added prelim turn off debugging support (john) * 20000515 Version 0.0.10j * 20000515 Fixed up URB allocation (clear URB on alloc) (john) * 20000515 Version 0.0.11j * 20000516 Removed unnecessary spinlock in mts_transfer_context (john) * 20000516 Removed unnecessary up on instance lock in mts_remove_nolock (john) * 20000516 Implemented (badly) scsi_abort (john) * 20000516 Version 0.0.12j * 20000517 Hopefully removed mts_remove_nolock quasideadlock (john) * 20000517 Added mts_debug_dump to print ll USB info (john) * 20000518 Tweaks and documentation updates (john) * 20000518 Version 0.0.13j * 20000518 Cleaned up abort handling (john) * 20000523 Removed scsi_command and various scsi_..._resets (john) * 20000523 Added unlink URB on scsi_abort, now OHCI supports it (john) * 20000523 Fixed last tiresome compile warning (john) * 20000523 Version 0.0.14j (though version 0.1 has come out?) * 20000602 Added primitive reset * 20000602 Version 0.2.0 * 20000603 various cosmetic changes * 20000603 Version 0.2.1 * 20000620 minor cosmetic changes * 20000620 Version 0.2.2 * 20000822 Hopefully fixed deadlock in mts_remove_nolock() * 20000822 Fixed minor race in mts_transfer_cleanup() * 20000822 Fixed deadlock on submission error in queuecommand * 20000822 Version 0.2.3 * 20000913 Reduced module size if debugging is off * 20000913 Version 0.2.4 * 20010210 New abort logic * 20010210 Version 0.3.0 * 20010217 Merged scatter/gather * 20010218 Version 0.4.0 * 20010218 Cosmetic fixes * 20010218 Version 0.4.1 * 20010306 Abort while using scatter/gather * 20010306 Version 0.4.2 * 20010311 Remove all timeouts and tidy up generally (john) * 20010320 check return value of scsi_register() * 20010320 Version 0.4.3 * 20010408 Identify version on module load. * 20011003 Fix multiple requests */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/random.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/proc_fs.h> #include <linux/atomic.h> #include <linux/blkdev.h> #include "../../scsi/scsi.h" #include <scsi/scsi_host.h> #include "microtek.h" /* * Version Information */ #define DRIVER_VERSION "v0.4.3" #define DRIVER_AUTHOR "John Fremlin <vii@penguinpowered.com>, Oliver Neukum <Oliver.Neukum@lrz.uni-muenchen.de>" #define DRIVER_DESC "Microtek Scanmaker X6 USB scanner driver" /* Should we do debugging? */ //#define MTS_DO_DEBUG /* USB layer driver interface */ static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id); static void mts_usb_disconnect(struct usb_interface *intf); static const struct usb_device_id mts_usb_ids[]; static struct usb_driver mts_usb_driver = { .name = "microtekX6", .probe = mts_usb_probe, .disconnect = mts_usb_disconnect, .id_table = mts_usb_ids, }; /* Internal driver stuff */ #define MTS_VERSION "0.4.3" #define MTS_NAME "microtek usb (rev " MTS_VERSION "): " #define MTS_WARNING(x...) \ printk( KERN_WARNING MTS_NAME x ) #define MTS_ERROR(x...) \ printk( KERN_ERR MTS_NAME x ) #define MTS_INT_ERROR(x...) \ MTS_ERROR(x) #define MTS_MESSAGE(x...) \ printk( KERN_INFO MTS_NAME x ) #if defined MTS_DO_DEBUG #define MTS_DEBUG(x...) \ printk( KERN_DEBUG MTS_NAME x ) #define MTS_DEBUG_GOT_HERE() \ MTS_DEBUG("got to %s:%d (%s)\n", __FILE__, (int)__LINE__, __func__ ) #define MTS_DEBUG_INT() \ do { MTS_DEBUG_GOT_HERE(); \ MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \ MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_length, (int)transfer->actual_length ); \ mts_debug_dump(context->instance);\ } while(0) #else #define MTS_NUL_STATEMENT do { } while(0) #define MTS_DEBUG(x...) MTS_NUL_STATEMENT #define MTS_DEBUG_GOT_HERE() MTS_NUL_STATEMENT #define MTS_DEBUG_INT() MTS_NUL_STATEMENT #endif #define MTS_INT_INIT()\ struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \ MTS_DEBUG_INT();\ #ifdef MTS_DO_DEBUG static inline void mts_debug_dump(struct mts_desc* desc) { MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n", (int)desc, (int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0] ); MTS_DEBUG("ep_out=%x ep_response=%x ep_image=%x\n", usb_sndbulkpipe(desc->usb_dev,desc->ep_out), usb_rcvbulkpipe(desc->usb_dev,desc->ep_response), usb_rcvbulkpipe(desc->usb_dev,desc->ep_image) ); } static inline void mts_show_command(struct scsi_cmnd *srb) { char *what = NULL; switch (srb->cmnd[0]) { case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break; case REZERO_UNIT: what = "REZERO_UNIT"; break; case REQUEST_SENSE: what = "REQUEST_SENSE"; break; case FORMAT_UNIT: what = "FORMAT_UNIT"; break; case READ_BLOCK_LIMITS: what = "READ_BLOCK_LIMITS"; break; case REASSIGN_BLOCKS: what = "REASSIGN_BLOCKS"; break; case READ_6: what = "READ_6"; break; case WRITE_6: what = "WRITE_6"; break; case SEEK_6: what = "SEEK_6"; break; case READ_REVERSE: what = "READ_REVERSE"; break; case WRITE_FILEMARKS: what = "WRITE_FILEMARKS"; break; case SPACE: what = "SPACE"; break; case INQUIRY: what = "INQUIRY"; break; case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break; case MODE_SELECT: what = "MODE_SELECT"; break; case RESERVE: what = "RESERVE"; break; case RELEASE: what = "RELEASE"; break; case COPY: what = "COPY"; break; case ERASE: what = "ERASE"; break; case MODE_SENSE: what = "MODE_SENSE"; break; case START_STOP: what = "START_STOP"; break; case RECEIVE_DIAGNOSTIC: what = "RECEIVE_DIAGNOSTIC"; break; case SEND_DIAGNOSTIC: what = "SEND_DIAGNOSTIC"; break; case ALLOW_MEDIUM_REMOVAL: what = "ALLOW_MEDIUM_REMOVAL"; break; case SET_WINDOW: what = "SET_WINDOW"; break; case READ_CAPACITY: what = "READ_CAPACITY"; break; case READ_10: what = "READ_10"; break; case WRITE_10: what = "WRITE_10"; break; case SEEK_10: what = "SEEK_10"; break; case WRITE_VERIFY: what = "WRITE_VERIFY"; break; case VERIFY: what = "VERIFY"; break; case SEARCH_HIGH: what = "SEARCH_HIGH"; break; case SEARCH_EQUAL: what = "SEARCH_EQUAL"; break; case SEARCH_LOW: what = "SEARCH_LOW"; break; case SET_LIMITS: what = "SET_LIMITS"; break; case READ_POSITION: what = "READ_POSITION"; break; case SYNCHRONIZE_CACHE: what = "SYNCHRONIZE_CACHE"; break; case LOCK_UNLOCK_CACHE: what = "LOCK_UNLOCK_CACHE"; break; case READ_DEFECT_DATA: what = "READ_DEFECT_DATA"; break; case MEDIUM_SCAN: what = "MEDIUM_SCAN"; break; case COMPARE: what = "COMPARE"; break; case COPY_VERIFY: what = "COPY_VERIFY"; break; case WRITE_BUFFER: what = "WRITE_BUFFER"; break; case READ_BUFFER: what = "READ_BUFFER"; break; case UPDATE_BLOCK: what = "UPDATE_BLOCK"; break; case READ_LONG: what = "READ_LONG"; break; case WRITE_LONG: what = "WRITE_LONG"; break; case CHANGE_DEFINITION: what = "CHANGE_DEFINITION"; break; case WRITE_SAME: what = "WRITE_SAME"; break; case READ_TOC: what = "READ_TOC"; break; case LOG_SELECT: what = "LOG_SELECT"; break; case LOG_SENSE: what = "LOG_SENSE"; break; case MODE_SELECT_10: what = "MODE_SELECT_10"; break; case MODE_SENSE_10: what = "MODE_SENSE_10"; break; case MOVE_MEDIUM: what = "MOVE_MEDIUM"; break; case READ_12: what = "READ_12"; break; case WRITE_12: what = "WRITE_12"; break; case WRITE_VERIFY_12: what = "WRITE_VERIFY_12"; break; case SEARCH_HIGH_12: what = "SEARCH_HIGH_12"; break; case SEARCH_EQUAL_12: what = "SEARCH_EQUAL_12"; break; case SEARCH_LOW_12: what = "SEARCH_LOW_12"; break; case READ_ELEMENT_STATUS: what = "READ_ELEMENT_STATUS"; break; case SEND_VOLUME_TAG: what = "SEND_VOLUME_TAG"; break; case WRITE_LONG_2: what = "WRITE_LONG_2"; break; default: MTS_DEBUG("can't decode command\n"); goto out; break; } MTS_DEBUG( "Command %s (%d bytes)\n", what, srb->cmd_len); out: MTS_DEBUG( " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", srb->cmnd[0], srb->cmnd[1], srb->cmnd[2], srb->cmnd[3], srb->cmnd[4], srb->cmnd[5], srb->cmnd[6], srb->cmnd[7], srb->cmnd[8], srb->cmnd[9]); } #else static inline void mts_show_command(struct scsi_cmnd * dummy) { } static inline void mts_debug_dump(struct mts_desc* dummy) { } #endif static inline void mts_urb_abort(struct mts_desc* desc) { MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); usb_kill_urb( desc->urb ); } static int mts_slave_alloc (struct scsi_device *s) { s->inquiry_len = 0x24; return 0; } static int mts_slave_configure (struct scsi_device *s) { blk_queue_dma_alignment(s->request_queue, (512 - 1)); return 0; } static int mts_scsi_abort(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); MTS_DEBUG_GOT_HERE(); mts_urb_abort(desc); return FAILED; } static int mts_scsi_host_reset(struct scsi_cmnd *srb) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int result; MTS_DEBUG_GOT_HERE(); mts_debug_dump(desc); result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf); if (result == 0) { result = usb_reset_device(desc->usb_dev); usb_unlock_device(desc->usb_dev); } return result ? FAILED : SUCCESS; } static int mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); static void mts_transfer_cleanup( struct urb *transfer ); static void mts_do_sg(struct urb * transfer); static inline void mts_int_submit_urb (struct urb* transfer, int pipe, void* data, unsigned length, usb_complete_t callback ) /* Interrupt context! */ /* Holding transfer->context->lock! */ { int res; MTS_INT_INIT(); usb_fill_bulk_urb(transfer, context->instance->usb_dev, pipe, data, length, callback, context ); res = usb_submit_urb( transfer, GFP_ATOMIC ); if ( unlikely(res) ) { MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); context->srb->result = DID_ERROR << 16; mts_transfer_cleanup(transfer); } } static void mts_transfer_cleanup( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); if ( likely(context->final_callback != NULL) ) context->final_callback(context->srb); } static void mts_transfer_done( struct urb *transfer ) { MTS_INT_INIT(); context->srb->result &= MTS_SCSI_ERR_MASK; context->srb->result |= (unsigned)(*context->scsi_status)<<1; mts_transfer_cleanup(transfer); } static void mts_get_status( struct urb *transfer ) /* Interrupt context! */ { MTS_INT_INIT(); mts_int_submit_urb(transfer, usb_rcvbulkpipe(context->instance->usb_dev, context->instance->ep_response), context->scsi_status, 1, mts_transfer_done ); } static void mts_data_done( struct urb* transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( context->data_length != transfer->actual_length ) { scsi_set_resid(context->srb, context->data_length - transfer->actual_length); } else if ( unlikely(status) ) { context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; } mts_get_status(transfer); } static void mts_command_done( struct urb *transfer ) /* Interrupt context! */ { int status = transfer->status; MTS_INT_INIT(); if ( unlikely(status) ) { if (status == -ENOENT) { /* We are being killed */ MTS_DEBUG_GOT_HERE(); context->srb->result = DID_ABORT<<16; } else { /* A genuine error has occurred */ MTS_DEBUG_GOT_HERE(); context->srb->result = DID_ERROR<<16; } mts_transfer_cleanup(transfer); return; } if (context->srb->cmnd[0] == REQUEST_SENSE) { mts_int_submit_urb(transfer, context->data_pipe, context->srb->sense_buffer, context->data_length, mts_data_done); } else { if ( context->data ) { mts_int_submit_urb(transfer, context->data_pipe, context->data, context->data_length, scsi_sg_count(context->srb) > 1 ? mts_do_sg : mts_data_done); } else { mts_get_status(transfer); } } } static void mts_do_sg (struct urb* transfer) { struct scatterlist * sg; int status = transfer->status; MTS_INT_INIT(); MTS_DEBUG("Processing fragment %d of %d\n", context->fragment, scsi_sg_count(context->srb)); if (unlikely(status)) { context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; mts_transfer_cleanup(transfer); } sg = scsi_sglist(context->srb); context->fragment++; mts_int_submit_urb(transfer, context->data_pipe, sg_virt(&sg[context->fragment]), sg[context->fragment].length, context->fragment + 1 == scsi_sg_count(context->srb) ? mts_data_done : mts_do_sg); } static const u8 mts_read_image_sig[] = { 0x28, 00, 00, 00 }; static const u8 mts_read_image_sig_len = 4; static const unsigned char mts_direction[256/8] = { 0x28, 0x81, 0x14, 0x14, 0x20, 0x01, 0x90, 0x77, 0x0C, 0x20, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; #define MTS_DIRECTION_IS_IN(x) ((mts_direction[x>>3] >> (x & 7)) & 1) static void mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) { int pipe; struct scatterlist * sg; MTS_DEBUG_GOT_HERE(); desc->context.instance = desc; desc->context.srb = srb; desc->context.fragment = 0; if (!scsi_bufflen(srb)) { desc->context.data = NULL; desc->context.data_length = 0; return; } else { sg = scsi_sglist(srb); desc->context.data = sg_virt(&sg[0]); desc->context.data_length = sg[0].length; } /* can't rely on srb->sc_data_direction */ /* Brutally ripped from usb-storage */ if ( !memcmp( srb->cmnd, mts_read_image_sig, mts_read_image_sig_len ) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image); MTS_DEBUG( "transferring from desc->ep_image == %d\n", (int)desc->ep_image ); } else if ( MTS_DIRECTION_IS_IN(srb->cmnd[0]) ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response); MTS_DEBUG( "transferring from desc->ep_response == %d\n", (int)desc->ep_response); } else { MTS_DEBUG("transferring to desc->ep_out == %d\n", (int)desc->ep_out); pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out); } desc->context.data_pipe = pipe; } static int mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int err = 0; int res; MTS_DEBUG_GOT_HERE(); mts_show_command(srb); mts_debug_dump(desc); if ( srb->device->lun || srb->device->id || srb->device->channel ) { MTS_DEBUG("Command to LUN=%d ID=%d CHANNEL=%d from SCSI layer\n",(int)srb->device->lun,(int)srb->device->id, (int)srb->device->channel ); MTS_DEBUG("this device doesn't exist\n"); srb->result = DID_BAD_TARGET << 16; if(likely(callback != NULL)) callback(srb); goto out; } usb_fill_bulk_urb(desc->urb, desc->usb_dev, usb_sndbulkpipe(desc->usb_dev,desc->ep_out), srb->cmnd, srb->cmd_len, mts_command_done, &desc->context ); mts_build_transfer_context( srb, desc ); desc->context.final_callback = callback; /* here we need ATOMIC as we are called with the iolock */ res=usb_submit_urb(desc->urb, GFP_ATOMIC); if(unlikely(res)){ MTS_ERROR("error %d submitting URB\n",(int)res); srb->result = DID_ERROR << 16; if(likely(callback != NULL)) callback(srb); } out: return err; } static DEF_SCSI_QCMD(mts_scsi_queuecommand) static struct scsi_host_template mts_scsi_host_template = { .module = THIS_MODULE, .name = "microtekX6", .proc_name = "microtekX6", .queuecommand = mts_scsi_queuecommand, .eh_abort_handler = mts_scsi_abort, .eh_host_reset_handler = mts_scsi_host_reset, .sg_tablesize = SG_ALL, .can_queue = 1, .this_id = -1, .cmd_per_lun = 1, .use_clustering = 1, .emulated = 1, .slave_alloc = mts_slave_alloc, .slave_configure = mts_slave_configure, .max_sectors= 256, /* 128 K */ }; /* The entries of microtek_table must correspond, line-by-line to the entries of mts_supported_products[]. */ static const struct usb_device_id mts_usb_ids[] = { { USB_DEVICE(0x4ce, 0x0300) }, { USB_DEVICE(0x5da, 0x0094) }, { USB_DEVICE(0x5da, 0x0099) }, { USB_DEVICE(0x5da, 0x009a) }, { USB_DEVICE(0x5da, 0x00a0) }, { USB_DEVICE(0x5da, 0x00a3) }, { USB_DEVICE(0x5da, 0x80a3) }, { USB_DEVICE(0x5da, 0x80ac) }, { USB_DEVICE(0x5da, 0x00b6) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, mts_usb_ids); static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; int ep_out = -1; int ep_in_set[3]; /* this will break if we have more than three endpoints which is why we check */ int *ep_in_current = ep_in_set; int err_retval = -ENOMEM; struct mts_desc * new_desc; struct usb_device *dev = interface_to_usbdev (intf); /* the current altsetting on the interface we're probing */ struct usb_host_interface *altsetting; MTS_DEBUG_GOT_HERE(); MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev ); MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n", le16_to_cpu(dev->descriptor.idProduct), le16_to_cpu(dev->descriptor.idVendor) ); MTS_DEBUG_GOT_HERE(); /* the current altsetting on the interface we're probing */ altsetting = intf->cur_altsetting; /* Check if the config is sane */ if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n", (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); return -ENODEV; } for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { if ((altsetting->endpoint[i].desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) { MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n", (int)altsetting->endpoint[i].desc.bEndpointAddress ); } else { if (altsetting->endpoint[i].desc.bEndpointAddress & USB_DIR_IN) *ep_in_current++ = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; else { if ( ep_out != -1 ) { MTS_WARNING( "can only deal with one output endpoints. Bailing out." ); return -ENODEV; } ep_out = altsetting->endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; } } } if ( ep_out == -1 ) { MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" ); return -ENODEV; } new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL); if (!new_desc) goto out; new_desc->urb = usb_alloc_urb(0, GFP_KERNEL); if (!new_desc->urb) goto out_kfree; new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL); if (!new_desc->context.scsi_status) goto out_free_urb; new_desc->usb_dev = dev; new_desc->usb_intf = intf; /* endpoints */ new_desc->ep_out = ep_out; new_desc->ep_response = ep_in_set[0]; new_desc->ep_image = ep_in_set[1]; if ( new_desc->ep_out != MTS_EP_OUT ) MTS_WARNING( "will this work? Command EP is not usually %d\n", (int)new_desc->ep_out ); if ( new_desc->ep_response != MTS_EP_RESPONSE ) MTS_WARNING( "will this work? Response EP is not usually %d\n", (int)new_desc->ep_response ); if ( new_desc->ep_image != MTS_EP_IMAGE ) MTS_WARNING( "will this work? Image data EP is not usually %d\n", (int)new_desc->ep_image ); new_desc->host = scsi_host_alloc(&mts_scsi_host_template, sizeof(new_desc)); if (!new_desc->host) goto out_kfree2; new_desc->host->hostdata[0] = (unsigned long)new_desc; if (scsi_add_host(new_desc->host, &dev->dev)) { err_retval = -EIO; goto out_host_put; } scsi_scan_host(new_desc->host); usb_set_intfdata(intf, new_desc); return 0; out_host_put: scsi_host_put(new_desc->host); out_kfree2: kfree(new_desc->context.scsi_status); out_free_urb: usb_free_urb(new_desc->urb); out_kfree: kfree(new_desc); out: return err_retval; } static void mts_usb_disconnect (struct usb_interface *intf) { struct mts_desc *desc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); usb_kill_urb(desc->urb); scsi_remove_host(desc->host); scsi_host_put(desc->host); usb_free_urb(desc->urb); kfree(desc->context.scsi_status); kfree(desc); } module_usb_driver(mts_usb_driver); MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL");
gpl-2.0
RickeysWorld/linux
drivers/ptp/ptp_chardev.c
8063
4014
/* * PTP 1588 clock support - character device implementation. * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/posix-clock.h> #include <linux/poll.h> #include <linux/sched.h> #include "ptp_private.h" int ptp_open(struct posix_clock *pc, fmode_t fmode) { return 0; } long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) { struct ptp_clock_caps caps; struct ptp_clock_request req; struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); struct ptp_clock_info *ops = ptp->info; int enable, err = 0; switch (cmd) { case PTP_CLOCK_GETCAPS: memset(&caps, 0, sizeof(caps)); caps.max_adj = ptp->info->max_adj; caps.n_alarm = ptp->info->n_alarm; caps.n_ext_ts = ptp->info->n_ext_ts; caps.n_per_out = ptp->info->n_per_out; caps.pps = ptp->info->pps; if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) err = -EFAULT; break; case PTP_EXTTS_REQUEST: if (copy_from_user(&req.extts, (void __user *)arg, sizeof(req.extts))) { err = -EFAULT; break; } if (req.extts.index >= ops->n_ext_ts) { err = -EINVAL; break; } req.type = PTP_CLK_REQ_EXTTS; enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0; err = ops->enable(ops, &req, enable); break; case PTP_PEROUT_REQUEST: if (copy_from_user(&req.perout, (void __user *)arg, sizeof(req.perout))) { err = -EFAULT; break; } if (req.perout.index >= ops->n_per_out) { err = -EINVAL; break; } req.type = PTP_CLK_REQ_PEROUT; enable = req.perout.period.sec || req.perout.period.nsec; err = ops->enable(ops, &req, enable); break; case PTP_ENABLE_PPS: if (!capable(CAP_SYS_TIME)) return -EPERM; req.type = PTP_CLK_REQ_PPS; enable = arg ? 1 : 0; err = ops->enable(ops, &req, enable); break; default: err = -ENOTTY; break; } return err; } unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); poll_wait(fp, &ptp->tsev_wq, wait); return queue_cnt(&ptp->tsevq) ? POLLIN : 0; } ssize_t ptp_read(struct posix_clock *pc, uint rdflags, char __user *buf, size_t cnt) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); struct timestamp_event_queue *queue = &ptp->tsevq; struct ptp_extts_event event[PTP_BUF_TIMESTAMPS]; unsigned long flags; size_t qcnt, i; if (cnt % sizeof(struct ptp_extts_event) != 0) return -EINVAL; if (cnt > sizeof(event)) cnt = sizeof(event); cnt = cnt / sizeof(struct ptp_extts_event); if (mutex_lock_interruptible(&ptp->tsevq_mux)) return -ERESTARTSYS; if (wait_event_interruptible(ptp->tsev_wq, ptp->defunct || queue_cnt(queue))) { mutex_unlock(&ptp->tsevq_mux); return -ERESTARTSYS; } if (ptp->defunct) { mutex_unlock(&ptp->tsevq_mux); return -ENODEV; } spin_lock_irqsave(&queue->lock, flags); qcnt = queue_cnt(queue); if (cnt > qcnt) cnt = qcnt; for (i = 0; i < cnt; i++) { event[i] = queue->buf[queue->head]; queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; } spin_unlock_irqrestore(&queue->lock, flags); cnt = cnt * sizeof(struct ptp_extts_event); mutex_unlock(&ptp->tsevq_mux); if (copy_to_user(buf, event, cnt)) return -EFAULT; return cnt; }
gpl-2.0
tzanussi/linux-yocto-micro-3.19
drivers/tty/serial/suncore.c
8831
5338
/* suncore.c * * Common SUN serial routines. Based entirely * upon drivers/sbus/char/sunserial.c which is: * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * * Adaptation to new UART layer is: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/serial_core.h> #include <linux/sunserialcore.h> #include <linux/init.h> #include <asm/prom.h> static int sunserial_current_minor = 64; int sunserial_register_minors(struct uart_driver *drv, int count) { int err = 0; drv->minor = sunserial_current_minor; drv->nr += count; /* Register the driver on the first call */ if (drv->nr == count) err = uart_register_driver(drv); if (err == 0) { sunserial_current_minor += count; drv->tty_driver->name_base = drv->minor - 64; } return err; } EXPORT_SYMBOL(sunserial_register_minors); void sunserial_unregister_minors(struct uart_driver *drv, int count) { drv->nr -= count; sunserial_current_minor -= count; if (drv->nr == 0) uart_unregister_driver(drv); } EXPORT_SYMBOL(sunserial_unregister_minors); int sunserial_console_match(struct console *con, struct device_node *dp, struct uart_driver *drv, int line, bool ignore_line) { if (!con) return 0; drv->cons = con; if (of_console_device != dp) return 0; if (!ignore_line) { int off = 0; if (of_console_options && *of_console_options == 'b') off = 1; if ((line & 1) != off) return 0; } if (!console_set_on_cmdline) { con->index = line; add_preferred_console(con->name, line, NULL); } return 1; } EXPORT_SYMBOL(sunserial_console_match); void sunserial_console_termios(struct console *con, struct device_node *uart_dp) { const char *mode, *s; char mode_prop[] = "ttyX-mode"; int baud, bits, stop, cflag; char parity; if (!strcmp(uart_dp->name, "rsc") || !strcmp(uart_dp->name, "rsc-console") || !strcmp(uart_dp->name, "rsc-control")) { mode = of_get_property(uart_dp, "ssp-console-modes", NULL); if (!mode) mode = "115200,8,n,1,-"; } else if (!strcmp(uart_dp->name, "lom-console")) { mode = "9600,8,n,1,-"; } else { struct device_node *dp; char c; c = 'a'; if (of_console_options) c = *of_console_options; mode_prop[3] = c; dp = of_find_node_by_path("/options"); mode = of_get_property(dp, mode_prop, NULL); if (!mode) mode = "9600,8,n,1,-"; } cflag = CREAD | HUPCL | CLOCAL; s = mode; baud = simple_strtoul(s, NULL, 0); s = strchr(s, ','); bits = simple_strtoul(++s, NULL, 0); s = strchr(s, ','); parity = *(++s); s = strchr(s, ','); stop = simple_strtoul(++s, NULL, 0); s = strchr(s, ','); /* XXX handshake is not handled here. */ switch (baud) { case 150: cflag |= B150; break; case 300: cflag |= B300; break; case 600: cflag |= B600; break; case 1200: cflag |= B1200; break; case 2400: cflag |= B2400; break; case 4800: cflag |= B4800; break; case 9600: cflag |= B9600; break; case 19200: cflag |= B19200; break; case 38400: cflag |= B38400; break; case 57600: cflag |= B57600; break; case 115200: cflag |= B115200; break; case 230400: cflag |= B230400; break; case 460800: cflag |= B460800; break; default: baud = 9600; cflag |= B9600; break; } switch (bits) { case 5: cflag |= CS5; break; case 6: cflag |= CS6; break; case 7: cflag |= CS7; break; case 8: cflag |= CS8; break; default: cflag |= CS8; break; } switch (parity) { case 'o': cflag |= (PARENB | PARODD); break; case 'e': cflag |= PARENB; break; case 'n': default: break; } switch (stop) { case 2: cflag |= CSTOPB; break; case 1: default: break; } con->cflag = cflag; } /* Sun serial MOUSE auto baud rate detection. */ static struct mouse_baud_cflag { int baud; unsigned int cflag; } mouse_baud_table[] = { { 1200, B1200 }, { 2400, B2400 }, { 4800, B4800 }, { 9600, B9600 }, { -1, ~0 }, { -1, ~0 }, }; unsigned int suncore_mouse_baud_cflag_next(unsigned int cflag, int *new_baud) { int i; for (i = 0; mouse_baud_table[i].baud != -1; i++) if (mouse_baud_table[i].cflag == (cflag & CBAUD)) break; i += 1; if (mouse_baud_table[i].baud == -1) i = 0; *new_baud = mouse_baud_table[i].baud; return mouse_baud_table[i].cflag; } EXPORT_SYMBOL(suncore_mouse_baud_cflag_next); /* Basically, when the baud rate is wrong the mouse spits out * breaks to us. */ int suncore_mouse_baud_detection(unsigned char ch, int is_break) { static int mouse_got_break = 0; static int ctr = 0; if (is_break) { /* Let a few normal bytes go by before we jump the gun * and say we need to try another baud rate. */ if (mouse_got_break && ctr < 8) return 1; /* Ok, we need to try another baud. */ ctr = 0; mouse_got_break = 1; return 2; } if (mouse_got_break) { ctr++; if (ch == 0x87) { /* Correct baud rate determined. */ mouse_got_break = 0; } return 1; } return 0; } EXPORT_SYMBOL(suncore_mouse_baud_detection); static int __init suncore_init(void) { return 0; } static void __exit suncore_exit(void) { } module_init(suncore_init); module_exit(suncore_exit); MODULE_AUTHOR("Eddie C. Dost, David S. Miller"); MODULE_DESCRIPTION("Sun serial common layer"); MODULE_LICENSE("GPL");
gpl-2.0
wwbhl/android_kernel_samsung_piranha
sound/ppc/daca.c
9855
7036
/* * PMac DACA lowlevel functions * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/i2c.h> #include <linux/kmod.h> #include <linux/slab.h> #include <sound/core.h> #include "pmac.h" /* i2c address */ #define DACA_I2C_ADDR 0x4d /* registers */ #define DACA_REG_SR 0x01 #define DACA_REG_AVOL 0x02 #define DACA_REG_GCFG 0x03 /* maximum volume value */ #define DACA_VOL_MAX 0x38 struct pmac_daca { struct pmac_keywest i2c; int left_vol, right_vol; unsigned int deemphasis : 1; unsigned int amp_on : 1; }; /* * initialize / detect DACA */ static int daca_init_client(struct pmac_keywest *i2c) { unsigned short wdata = 0x00; /* SR: no swap, 1bit delay, 32-48kHz */ /* GCFG: power amp inverted, DAC on */ if (i2c_smbus_write_byte_data(i2c->client, DACA_REG_SR, 0x08) < 0 || i2c_smbus_write_byte_data(i2c->client, DACA_REG_GCFG, 0x05) < 0) return -EINVAL; return i2c_smbus_write_block_data(i2c->client, DACA_REG_AVOL, 2, (unsigned char*)&wdata); } /* * update volume */ static int daca_set_volume(struct pmac_daca *mix) { unsigned char data[2]; if (! mix->i2c.client) return -ENODEV; if (mix->left_vol > DACA_VOL_MAX) data[0] = DACA_VOL_MAX; else data[0] = mix->left_vol; if (mix->right_vol > DACA_VOL_MAX) data[1] = DACA_VOL_MAX; else data[1] = mix->right_vol; data[1] |= mix->deemphasis ? 0x40 : 0; if (i2c_smbus_write_block_data(mix->i2c.client, DACA_REG_AVOL, 2, data) < 0) { snd_printk(KERN_ERR "failed to set volume \n"); return -EINVAL; } return 0; } /* deemphasis switch */ #define daca_info_deemphasis snd_ctl_boolean_mono_info static int daca_get_deemphasis(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->deemphasis ? 1 : 0; return 0; } static int daca_put_deemphasis(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; int change; if (! (mix = chip->mixer_data)) return -ENODEV; change = mix->deemphasis != ucontrol->value.integer.value[0]; if (change) { mix->deemphasis = !!ucontrol->value.integer.value[0]; daca_set_volume(mix); } return change; } /* output volume */ static int daca_info_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = DACA_VOL_MAX; return 0; } static int daca_get_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->left_vol; ucontrol->value.integer.value[1] = mix->right_vol; return 0; } static int daca_put_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; unsigned int vol[2]; int change; if (! (mix = chip->mixer_data)) return -ENODEV; vol[0] = ucontrol->value.integer.value[0]; vol[1] = ucontrol->value.integer.value[1]; if (vol[0] > DACA_VOL_MAX || vol[1] > DACA_VOL_MAX) return -EINVAL; change = mix->left_vol != vol[0] || mix->right_vol != vol[1]; if (change) { mix->left_vol = vol[0]; mix->right_vol = vol[1]; daca_set_volume(mix); } return change; } /* amplifier switch */ #define daca_info_amp daca_info_deemphasis static int daca_get_amp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; if (! (mix = chip->mixer_data)) return -ENODEV; ucontrol->value.integer.value[0] = mix->amp_on ? 1 : 0; return 0; } static int daca_put_amp(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_pmac *chip = snd_kcontrol_chip(kcontrol); struct pmac_daca *mix; int change; if (! (mix = chip->mixer_data)) return -ENODEV; change = mix->amp_on != ucontrol->value.integer.value[0]; if (change) { mix->amp_on = !!ucontrol->value.integer.value[0]; i2c_smbus_write_byte_data(mix->i2c.client, DACA_REG_GCFG, mix->amp_on ? 0x05 : 0x04); } return change; } static struct snd_kcontrol_new daca_mixers[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Deemphasis Switch", .info = daca_info_deemphasis, .get = daca_get_deemphasis, .put = daca_put_deemphasis }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Volume", .info = daca_info_volume, .get = daca_get_volume, .put = daca_put_volume }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Power Amplifier Switch", .info = daca_info_amp, .get = daca_get_amp, .put = daca_put_amp }, }; #ifdef CONFIG_PM static void daca_resume(struct snd_pmac *chip) { struct pmac_daca *mix = chip->mixer_data; i2c_smbus_write_byte_data(mix->i2c.client, DACA_REG_SR, 0x08); i2c_smbus_write_byte_data(mix->i2c.client, DACA_REG_GCFG, mix->amp_on ? 0x05 : 0x04); daca_set_volume(mix); } #endif /* CONFIG_PM */ static void daca_cleanup(struct snd_pmac *chip) { struct pmac_daca *mix = chip->mixer_data; if (! mix) return; snd_pmac_keywest_cleanup(&mix->i2c); kfree(mix); chip->mixer_data = NULL; } /* exported */ int __devinit snd_pmac_daca_init(struct snd_pmac *chip) { int i, err; struct pmac_daca *mix; request_module("i2c-powermac"); mix = kzalloc(sizeof(*mix), GFP_KERNEL); if (! mix) return -ENOMEM; chip->mixer_data = mix; chip->mixer_free = daca_cleanup; mix->amp_on = 1; /* default on */ mix->i2c.addr = DACA_I2C_ADDR; mix->i2c.init_client = daca_init_client; mix->i2c.name = "DACA"; if ((err = snd_pmac_keywest_init(&mix->i2c)) < 0) return err; /* * build mixers */ strcpy(chip->card->mixername, "PowerMac DACA"); for (i = 0; i < ARRAY_SIZE(daca_mixers); i++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&daca_mixers[i], chip))) < 0) return err; } #ifdef CONFIG_PM chip->resume = daca_resume; #endif return 0; }
gpl-2.0
mifl/android_kernel_pantech_ef63-common
arch/mips/math-emu/dp_scalb.c
10367
1482
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_scalb(ieee754dp x, int n) { COMPXDP; CLEARCX; EXPLODEXDP; switch (xc) { case IEEE754_CLASS_SNAN: return ieee754dp_nanxcpt(x, "scalb", x, n); case IEEE754_CLASS_QNAN: case IEEE754_CLASS_INF: case IEEE754_CLASS_ZERO: return x; case IEEE754_CLASS_DNORM: DPDNORMX; break; case IEEE754_CLASS_NORM: break; } DPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n); } ieee754dp ieee754dp_ldexp(ieee754dp x, int n) { return ieee754dp_scalb(x, n); }
gpl-2.0
javilonas/Enki-GT-I9300
arch/mips/math-emu/dp_frexp.c
10367
1424
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" /* close to ieeep754dp_logb */ ieee754dp ieee754dp_frexp(ieee754dp x, int *eptr) { COMPXDP; CLEARCX; EXPLODEXDP; switch (xc) { case IEEE754_CLASS_SNAN: case IEEE754_CLASS_QNAN: case IEEE754_CLASS_INF: case IEEE754_CLASS_ZERO: *eptr = 0; return x; case IEEE754_CLASS_DNORM: DPDNORMX; break; case IEEE754_CLASS_NORM: break; } *eptr = xe + 1; return builddp(xs, -1 + DP_EBIAS, xm & ~DP_HIDDEN_BIT); }
gpl-2.0
PyYoshi/is01_froyo_kernel
arch/powerpc/xmon/ppc-dis.c
13695
5571
/* ppc-dis.c -- Disassemble PowerPC instructions Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Written by Ian Lance Taylor, Cygnus Support This file is part of GDB, GAS, and the GNU binutils. GDB, GAS, and the GNU binutils are free software; you can redistribute them and/or modify them under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GDB, GAS, and the GNU binutils are distributed in the hope that they will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this file; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include <asm/cputable.h> #include "nonstdio.h" #include "ansidecl.h" #include "ppc.h" #include "dis-asm.h" /* Print a PowerPC or POWER instruction. */ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) { const struct powerpc_opcode *opcode; const struct powerpc_opcode *opcode_end; unsigned long op; int dialect; dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC; if (cpu_has_feature(CPU_FTRS_POWER5)) dialect |= PPC_OPCODE_POWER5; if (cpu_has_feature(CPU_FTRS_CELL)) dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC; if (cpu_has_feature(CPU_FTRS_POWER6)) dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC; /* Get the major opcode of the instruction. */ op = PPC_OP (insn); /* Find the first match in the opcode table. We could speed this up a bit by doing a binary search on the major opcode. */ opcode_end = powerpc_opcodes + powerpc_num_opcodes; again: for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++) { unsigned long table_op; const unsigned char *opindex; const struct powerpc_operand *operand; int invalid; int need_comma; int need_paren; table_op = PPC_OP (opcode->opcode); if (op < table_op) break; if (op > table_op) continue; if ((insn & opcode->mask) != opcode->opcode || (opcode->flags & dialect) == 0) continue; /* Make two passes over the operands. First see if any of them have extraction functions, and, if they do, make sure the instruction is valid. */ invalid = 0; for (opindex = opcode->operands; *opindex != 0; opindex++) { operand = powerpc_operands + *opindex; if (operand->extract) (*operand->extract) (insn, dialect, &invalid); } if (invalid) continue; /* The instruction is valid. */ printf("%s", opcode->name); if (opcode->operands[0] != 0) printf("\t"); /* Now extract and print the operands. */ need_comma = 0; need_paren = 0; for (opindex = opcode->operands; *opindex != 0; opindex++) { long value; operand = powerpc_operands + *opindex; /* Operands that are marked FAKE are simply ignored. We already made sure that the extract function considered the instruction to be valid. */ if ((operand->flags & PPC_OPERAND_FAKE) != 0) continue; /* Extract the value from the instruction. */ if (operand->extract) value = (*operand->extract) (insn, dialect, &invalid); else { value = (insn >> operand->shift) & ((1 << operand->bits) - 1); if ((operand->flags & PPC_OPERAND_SIGNED) != 0 && (value & (1 << (operand->bits - 1))) != 0) value -= 1 << operand->bits; } /* If the operand is optional, and the value is zero, don't print anything. */ if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0 && (operand->flags & PPC_OPERAND_NEXT) == 0 && value == 0) continue; if (need_comma) { printf(","); need_comma = 0; } /* Print the operand as directed by the flags. */ if ((operand->flags & PPC_OPERAND_GPR) != 0 || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0)) printf("r%ld", value); else if ((operand->flags & PPC_OPERAND_FPR) != 0) printf("f%ld", value); else if ((operand->flags & PPC_OPERAND_VR) != 0) printf("v%ld", value); else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0) print_address (memaddr + value); else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0) print_address (value & 0xffffffff); else if ((operand->flags & PPC_OPERAND_CR) == 0 || (dialect & PPC_OPCODE_PPC) == 0) printf("%ld", value); else { if (operand->bits == 3) printf("cr%ld", value); else { static const char *cbnames[4] = { "lt", "gt", "eq", "so" }; int cr; int cc; cr = value >> 2; if (cr != 0) printf("4*cr%d+", cr); cc = value & 3; printf("%s", cbnames[cc]); } } if (need_paren) { printf(")"); need_paren = 0; } if ((operand->flags & PPC_OPERAND_PARENS) == 0) need_comma = 1; else { printf("("); need_paren = 1; } } /* We have found and printed an instruction; return. */ return 4; } if ((dialect & PPC_OPCODE_ANY) != 0) { dialect = ~PPC_OPCODE_ANY; goto again; } /* We could not find a match. */ printf(".long 0x%lx", insn); return 4; }
gpl-2.0
bonezuk/linux
drivers/video/fbdev/i810/i810-i2c.c
13695
5076
/*-*- linux-c -*- * linux/drivers/video/i810-i2c.c -- Intel 810/815 I2C support * * Copyright (C) 2004 Antonino Daplas<adaplas@pol.net> * All Rights Reserved * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/pci.h> #include <linux/fb.h> #include "i810.h" #include "i810_regs.h" #include "i810_main.h" #include "../edid.h" /* bit locations in the registers */ #define SCL_DIR_MASK 0x0001 #define SCL_DIR 0x0002 #define SCL_VAL_MASK 0x0004 #define SCL_VAL_OUT 0x0008 #define SCL_VAL_IN 0x0010 #define SDA_DIR_MASK 0x0100 #define SDA_DIR 0x0200 #define SDA_VAL_MASK 0x0400 #define SDA_VAL_OUT 0x0800 #define SDA_VAL_IN 0x1000 #define DEBUG /* define this for verbose EDID parsing output */ #ifdef DEBUG #define DPRINTK(fmt, args...) printk(fmt,## args) #else #define DPRINTK(fmt, args...) #endif static void i810i2c_setscl(void *data, int state) { struct i810fb_i2c_chan *chan = data; struct i810fb_par *par = chan->par; u8 __iomem *mmio = par->mmio_start_virtual; if (state) i810_writel(mmio, chan->ddc_base, SCL_DIR_MASK | SCL_VAL_MASK); else i810_writel(mmio, chan->ddc_base, SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK); i810_readl(mmio, chan->ddc_base); /* flush posted write */ } static void i810i2c_setsda(void *data, int state) { struct i810fb_i2c_chan *chan = data; struct i810fb_par *par = chan->par; u8 __iomem *mmio = par->mmio_start_virtual; if (state) i810_writel(mmio, chan->ddc_base, SDA_DIR_MASK | SDA_VAL_MASK); else i810_writel(mmio, chan->ddc_base, SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK); i810_readl(mmio, chan->ddc_base); /* flush posted write */ } static int i810i2c_getscl(void *data) { struct i810fb_i2c_chan *chan = data; struct i810fb_par *par = chan->par; u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, chan->ddc_base, SCL_DIR_MASK); i810_writel(mmio, chan->ddc_base, 0); return ((i810_readl(mmio, chan->ddc_base) & SCL_VAL_IN) != 0); } static int i810i2c_getsda(void *data) { struct i810fb_i2c_chan *chan = data; struct i810fb_par *par = chan->par; u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, chan->ddc_base, SDA_DIR_MASK); i810_writel(mmio, chan->ddc_base, 0); return ((i810_readl(mmio, chan->ddc_base) & SDA_VAL_IN) != 0); } static int i810_setup_i2c_bus(struct i810fb_i2c_chan *chan, const char *name) { int rc; strcpy(chan->adapter.name, name); chan->adapter.owner = THIS_MODULE; chan->adapter.algo_data = &chan->algo; chan->adapter.dev.parent = &chan->par->dev->dev; chan->algo.setsda = i810i2c_setsda; chan->algo.setscl = i810i2c_setscl; chan->algo.getsda = i810i2c_getsda; chan->algo.getscl = i810i2c_getscl; chan->algo.udelay = 10; chan->algo.timeout = (HZ/2); chan->algo.data = chan; i2c_set_adapdata(&chan->adapter, chan); /* Raise SCL and SDA */ chan->algo.setsda(chan, 1); chan->algo.setscl(chan, 1); udelay(20); rc = i2c_bit_add_bus(&chan->adapter); if (rc == 0) dev_dbg(&chan->par->dev->dev, "I2C bus %s registered.\n",name); else { dev_warn(&chan->par->dev->dev, "Failed to register I2C bus " "%s.\n", name); chan->par = NULL; } return rc; } void i810_create_i2c_busses(struct i810fb_par *par) { par->chan[0].par = par; par->chan[1].par = par; par->chan[2].par = par; par->chan[0].ddc_base = GPIOA; i810_setup_i2c_bus(&par->chan[0], "I810-DDC"); par->chan[1].ddc_base = GPIOB; i810_setup_i2c_bus(&par->chan[1], "I810-I2C"); par->chan[2].ddc_base = GPIOC; i810_setup_i2c_bus(&par->chan[2], "I810-GPIOC"); } void i810_delete_i2c_busses(struct i810fb_par *par) { if (par->chan[0].par) i2c_del_adapter(&par->chan[0].adapter); par->chan[0].par = NULL; if (par->chan[1].par) i2c_del_adapter(&par->chan[1].adapter); par->chan[1].par = NULL; if (par->chan[2].par) i2c_del_adapter(&par->chan[2].adapter); par->chan[2].par = NULL; } int i810_probe_i2c_connector(struct fb_info *info, u8 **out_edid, int conn) { struct i810fb_par *par = info->par; u8 *edid = NULL; DPRINTK("i810-i2c: Probe DDC%i Bus\n", conn+1); if (conn < par->ddc_num) { edid = fb_ddc_read(&par->chan[conn].adapter); } else { const u8 *e = fb_firmware_edid(info->device); if (e != NULL) { DPRINTK("i810-i2c: Getting EDID from BIOS\n"); edid = kmemdup(e, EDID_LENGTH, GFP_KERNEL); } } *out_edid = edid; return (edid) ? 0 : 1; }
gpl-2.0
val2k/linux
net/ipv6/calipso.c
128
39526
/* * CALIPSO - Common Architecture Label IPv6 Security Option * * This is an implementation of the CALIPSO protocol as specified in * RFC 5570. * * Authors: Paul Moore <paul.moore@hp.com> * Huw Davies <huw@codeweavers.com> * */ /* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/calipso.h> #include <linux/atomic.h> #include <linux/bug.h> #include <asm/unaligned.h> #include <linux/crc-ccitt.h> /* Maximium size of the calipso option including * the two-byte TLV header. */ #define CALIPSO_OPT_LEN_MAX (2 + 252) /* Size of the minimum calipso option including * the two-byte TLV header. */ #define CALIPSO_HDR_LEN (2 + 8) /* Maximium size of the calipso option including * the two-byte TLV header and upto 3 bytes of * leading pad and 7 bytes of trailing pad. */ #define CALIPSO_OPT_LEN_MAX_WITH_PAD (3 + CALIPSO_OPT_LEN_MAX + 7) /* Maximium size of u32 aligned buffer required to hold calipso * option. Max of 3 initial pad bytes starting from buffer + 3. * i.e. the worst case is when the previous tlv finishes on 4n + 3. */ #define CALIPSO_MAX_BUFFER (6 + CALIPSO_OPT_LEN_MAX) /* List of available DOI definitions */ static DEFINE_SPINLOCK(calipso_doi_list_lock); static LIST_HEAD(calipso_doi_list); /* Label mapping cache */ int calipso_cache_enabled = 1; int calipso_cache_bucketsize = 10; #define CALIPSO_CACHE_BUCKETBITS 7 #define CALIPSO_CACHE_BUCKETS BIT(CALIPSO_CACHE_BUCKETBITS) #define CALIPSO_CACHE_REORDERLIMIT 10 struct calipso_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct calipso_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct calipso_map_cache_bkt *calipso_cache; /* Label Mapping Cache Functions */ /** * calipso_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void calipso_cache_entry_free(struct calipso_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * calipso_map_cache_hash - Hashing function for the CALIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CALIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 calipso_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /** * calipso_cache_init - Initialize the CALIPSO cache * * Description: * Initializes the CALIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int __init calipso_cache_init(void) { u32 iter; calipso_cache = kcalloc(CALIPSO_CACHE_BUCKETS, sizeof(struct calipso_map_cache_bkt), GFP_KERNEL); if (!calipso_cache) return -ENOMEM; for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { spin_lock_init(&calipso_cache[iter].lock); calipso_cache[iter].size = 0; INIT_LIST_HEAD(&calipso_cache[iter].list); } return 0; } /** * calipso_cache_invalidate - Invalidates the current CALIPSO cache * * Description: * Invalidates and frees any entries in the CALIPSO cache. Returns zero on * success and negative values on failure. * */ static void calipso_cache_invalidate(void) { struct calipso_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { spin_lock_bh(&calipso_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &calipso_cache[iter].list, list) { list_del(&entry->list); calipso_cache_entry_free(entry); } calipso_cache[iter].size = 0; spin_unlock_bh(&calipso_cache[iter].lock); } } /** * calipso_cache_check - Check the CALIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CALIPSO_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int calipso_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct calipso_map_cache_entry *entry; struct calipso_map_cache_entry *prev_entry = NULL; u32 hash; if (!calipso_cache_enabled) return -ENOENT; hash = calipso_map_cache_hash(key, key_len); bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); spin_lock_bh(&calipso_cache[bkt].lock); list_for_each_entry(entry, &calipso_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; refcount_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CALIPSO; if (!prev_entry) { spin_unlock_bh(&calipso_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CALIPSO_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&calipso_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&calipso_cache[bkt].lock); return -ENOENT; } /** * calipso_cache_add - Add an entry to the CALIPSO cache * @calipso_ptr: the CALIPSO option * @secattr: the packet's security attributes * * Description: * Add a new entry into the CALIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. The key stored starts at calipso_ptr + 2, * i.e. the type and length bytes are not stored, this corresponds to * calipso_ptr[1] bytes of data. * */ static int calipso_cache_add(const unsigned char *calipso_ptr, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct calipso_map_cache_entry *entry = NULL; struct calipso_map_cache_entry *old_entry = NULL; u32 calipso_ptr_len; if (!calipso_cache_enabled || calipso_cache_bucketsize <= 0) return 0; calipso_ptr_len = calipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; entry->key = kmemdup(calipso_ptr + 2, calipso_ptr_len, GFP_ATOMIC); if (!entry->key) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = calipso_ptr_len; entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len); refcount_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); spin_lock_bh(&calipso_cache[bkt].lock); if (calipso_cache[bkt].size < calipso_cache_bucketsize) { list_add(&entry->list, &calipso_cache[bkt].list); calipso_cache[bkt].size += 1; } else { old_entry = list_entry(calipso_cache[bkt].list.prev, struct calipso_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &calipso_cache[bkt].list); calipso_cache_entry_free(old_entry); } spin_unlock_bh(&calipso_cache[bkt].lock); return 0; cache_add_failure: if (entry) calipso_cache_entry_free(entry); return ret_val; } /* DOI List Functions */ /** * calipso_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct calipso_doi *calipso_doi_search(u32 doi) { struct calipso_doi *iter; list_for_each_entry_rcu(iter, &calipso_doi_list, list) if (iter->doi == doi && refcount_read(&iter->refcount)) return iter; return NULL; } /** * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CALIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see calipso.h for details). Returns * zero on success and non-zero on failure. * */ static int calipso_doi_add(struct calipso_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CALIPSO_DOI_UNKNOWN) goto doi_add_return; refcount_set(&doi_def->refcount, 1); spin_lock(&calipso_doi_list_lock); if (calipso_doi_search(doi_def->doi)) { spin_unlock(&calipso_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &calipso_doi_list); spin_unlock(&calipso_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_ADD, audit_info); if (audit_buf) { const char *type_str; switch (doi_type) { case CALIPSO_MAP_PASS: type_str = "pass"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " calipso_doi=%u calipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * calipso_doi_free - Frees a DOI definition * @doi_def: the DOI definition * * Description: * This function frees all of the memory associated with a DOI definition. * */ static void calipso_doi_free(struct calipso_doi *doi_def) { kfree(doi_def); } /** * calipso_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void calipso_doi_free_rcu(struct rcu_head *entry) { struct calipso_doi *doi_def; doi_def = container_of(entry, struct calipso_doi, rcu); calipso_doi_free(doi_def); } /** * calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CALIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct calipso_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&calipso_doi_list_lock); doi_def = calipso_doi_search(doi); if (!doi_def) { spin_unlock(&calipso_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!refcount_dec_and_test(&doi_def->refcount)) { spin_unlock(&calipso_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&calipso_doi_list_lock); call_rcu(&doi_def->rcu, calipso_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_DEL, audit_info); if (audit_buf) { audit_log_format(audit_buf, " calipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * calipso_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * calipso_doi_putdef() is called when the caller is done. * */ static struct calipso_doi *calipso_doi_getdef(u32 doi) { struct calipso_doi *doi_def; rcu_read_lock(); doi_def = calipso_doi_search(doi); if (!doi_def) goto doi_getdef_return; if (!refcount_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * calipso_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from calipso_doi_getdef(). * */ static void calipso_doi_putdef(struct calipso_doi *doi_def) { if (!doi_def) return; if (!refcount_dec_and_test(&doi_def->refcount)) return; spin_lock(&calipso_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&calipso_doi_list_lock); call_rcu(&doi_def->rcu, calipso_doi_free_rcu); } /** * calipso_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ static int calipso_doi_walk(u32 *skip_cnt, int (*callback)(struct calipso_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct calipso_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &calipso_doi_list, list) if (refcount_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /** * calipso_validate - Validate a CALIPSO option * @skb: the packet * @option: the start of the option * * Description: * This routine is called to validate a CALIPSO option. * If the option is valid then %true is returned, otherwise * %false is returned. * * The caller should have already checked that the length of the * option (including the TLV header) is >= 10 and that the catmap * length is consistent with the option length. * * We leave checks on the level and categories to the socket layer. */ bool calipso_validate(const struct sk_buff *skb, const unsigned char *option) { struct calipso_doi *doi_def; bool ret_val; u16 crc, len = option[1] + 2; static const u8 zero[2]; /* The original CRC runs over the option including the TLV header * with the CRC-16 field (at offset 8) zeroed out. */ crc = crc_ccitt(0xffff, option, 8); crc = crc_ccitt(crc, zero, sizeof(zero)); if (len > 10) crc = crc_ccitt(crc, option + 10, len - 10); crc = ~crc; if (option[8] != (crc & 0xff) || option[9] != ((crc >> 8) & 0xff)) return false; rcu_read_lock(); doi_def = calipso_doi_search(get_unaligned_be32(option + 2)); ret_val = !!doi_def; rcu_read_unlock(); return ret_val; } /** * calipso_map_cat_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CALIPSO format * @net_cat_len: the length of the CALIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CALIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int calipso_map_cat_hton(const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int spot = -1; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; for (;;) { spot = netlbl_catmap_walk(secattr->attr.mls.cat, spot + 1); if (spot < 0) break; if (spot >= net_clen_bits) return -ENOSPC; netlbl_bitmap_setbit(net_cat, spot, 1); if (spot > net_spot_max) net_spot_max = spot; } return (net_spot_max / 32 + 1) * 4; } /** * calipso_map_cat_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CALIPSO format * @net_cat_len: the length of the CALIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CALIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int spot = -1; u32 net_clen_bits = net_cat_len * 8; for (;;) { spot = netlbl_bitmap_walk(net_cat, net_clen_bits, spot + 1, 1); if (spot < 0) { if (spot == -2) return -EFAULT; return 0; } ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat, spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * calipso_pad_write - Writes pad bytes in TLV format * @buf: the buffer * @offset: offset from start of buffer to write padding * @count: number of pad bytes to write * * Description: * Write @count bytes of TLV padding into @buffer starting at offset @offset. * @count should be less than 8 - see RFC 4942. * */ static int calipso_pad_write(unsigned char *buf, unsigned int offset, unsigned int count) { if (WARN_ON_ONCE(count >= 8)) return -EINVAL; switch (count) { case 0: break; case 1: buf[offset] = IPV6_TLV_PAD1; break; default: buf[offset] = IPV6_TLV_PADN; buf[offset + 1] = count - 2; if (count > 2) memset(buf + offset + 2, 0, count - 2); break; } return 0; } /** * calipso_genopt - Generate a CALIPSO option * @buf: the option buffer * @start: offset from which to write * @buf_len: the size of opt_buf * @doi_def: the CALIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CALIPSO option using the DOI definition and security attributes * passed to the function. This also generates upto three bytes of leading * padding that ensures that the option is 4n + 2 aligned. It returns the * number of bytes written (including any initial padding). */ static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 len, pad; u16 crc; static const unsigned char padding[4] = {2, 1, 0, 3}; unsigned char *calipso; /* CALIPSO has 4n + 2 alignment */ pad = padding[start & 3]; if (buf_len <= start + pad + CALIPSO_HDR_LEN) return -ENOSPC; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; len = CALIPSO_HDR_LEN; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = calipso_map_cat_hton(doi_def, secattr, buf + start + pad + len, buf_len - start - pad - len); if (ret_val < 0) return ret_val; len += ret_val; } calipso_pad_write(buf, start, pad); calipso = buf + start + pad; calipso[0] = IPV6_TLV_CALIPSO; calipso[1] = len - 2; *(__be32 *)(calipso + 2) = htonl(doi_def->doi); calipso[6] = (len - CALIPSO_HDR_LEN) / 4; calipso[7] = secattr->attr.mls.lvl, crc = ~crc_ccitt(0xffff, calipso, len); calipso[8] = crc & 0xff; calipso[9] = (crc >> 8) & 0xff; return pad + len; } /* Hop-by-hop hdr helper functions */ /** * calipso_opt_update - Replaces socket's hop options with a new set * @sk: the socket * @hop: new hop options * * Description: * Replaces @sk's hop options with @hop. @hop may be NULL to leave * the socket with no hop options. * */ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) { struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS, hop, hop ? ipv6_optlen(hop) : 0); txopt_put(old); if (IS_ERR(txopts)) return PTR_ERR(txopts); txopts = ipv6_update_options(sk, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } return 0; } /** * calipso_tlv_len - Returns the length of the TLV * @opt: the option header * @offset: offset of the TLV within the header * * Description: * Returns the length of the TLV option at offset @offset within * the option header @opt. Checks that the entire TLV fits inside * the option header, returns a negative value if this is not the case. */ static int calipso_tlv_len(struct ipv6_opt_hdr *opt, unsigned int offset) { unsigned char *tlv = (unsigned char *)opt; unsigned int opt_len = ipv6_optlen(opt), tlv_len; if (offset < sizeof(*opt) || offset >= opt_len) return -EINVAL; if (tlv[offset] == IPV6_TLV_PAD1) return 1; if (offset + 1 >= opt_len) return -EINVAL; tlv_len = tlv[offset + 1] + 2; if (offset + tlv_len > opt_len) return -EINVAL; return tlv_len; } /** * calipso_opt_find - Finds the CALIPSO option in an IPv6 hop options header * @hop: the hop options header * @start: on return holds the offset of any leading padding * @end: on return holds the offset of the first non-pad TLV after CALIPSO * * Description: * Finds the space occupied by a CALIPSO option (including any leading and * trailing padding). * * If a CALIPSO option exists set @start and @end to the * offsets within @hop of the start of padding before the first * CALIPSO option and the end of padding after the first CALIPSO * option. In this case the function returns 0. * * In the absence of a CALIPSO option, @start and @end will be * set to the start and end of any trailing padding in the header. * This is useful when appending a new option, as the caller may want * to overwrite some of this padding. In this case the function will * return -ENOENT. */ static int calipso_opt_find(struct ipv6_opt_hdr *hop, unsigned int *start, unsigned int *end) { int ret_val = -ENOENT, tlv_len; unsigned int opt_len, offset, offset_s = 0, offset_e = 0; unsigned char *opt = (unsigned char *)hop; opt_len = ipv6_optlen(hop); offset = sizeof(*hop); while (offset < opt_len) { tlv_len = calipso_tlv_len(hop, offset); if (tlv_len < 0) return tlv_len; switch (opt[offset]) { case IPV6_TLV_PAD1: case IPV6_TLV_PADN: if (offset_e) offset_e = offset; break; case IPV6_TLV_CALIPSO: ret_val = 0; offset_e = offset; break; default: if (offset_e == 0) offset_s = offset; else goto out; } offset += tlv_len; } out: if (offset_s) *start = offset_s + calipso_tlv_len(hop, offset_s); else *start = sizeof(*hop); if (offset_e) *end = offset_e + calipso_tlv_len(hop, offset_e); else *end = opt_len; return ret_val; } /** * calipso_opt_insert - Inserts a CALIPSO option into an IPv6 hop opt hdr * @hop: the original hop options header * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Creates a new hop options header based on @hop with a * CALIPSO option added to it. If @hop already contains a CALIPSO * option this is overwritten, otherwise the new option is appended * after any existing options. If @hop is NULL then the new header * will contain just the CALIPSO option and any needed padding. * */ static struct ipv6_opt_hdr * calipso_opt_insert(struct ipv6_opt_hdr *hop, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { unsigned int start, end, buf_len, pad, hop_len; struct ipv6_opt_hdr *new; int ret_val; if (hop) { hop_len = ipv6_optlen(hop); ret_val = calipso_opt_find(hop, &start, &end); if (ret_val && ret_val != -ENOENT) return ERR_PTR(ret_val); } else { hop_len = 0; start = sizeof(*hop); end = 0; } buf_len = hop_len + start - end + CALIPSO_OPT_LEN_MAX_WITH_PAD; new = kzalloc(buf_len, GFP_ATOMIC); if (!new) return ERR_PTR(-ENOMEM); if (start > sizeof(*hop)) memcpy(new, hop, start); ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, secattr); if (ret_val < 0) { kfree(new); return ERR_PTR(ret_val); } buf_len = start + ret_val; /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ pad = ((buf_len & 4) + (end & 7)) & 7; calipso_pad_write((unsigned char *)new, buf_len, pad); buf_len += pad; if (end != hop_len) { memcpy((char *)new + buf_len, (char *)hop + end, hop_len - end); buf_len += hop_len - end; } new->nexthdr = 0; new->hdrlen = buf_len / 8 - 1; return new; } /** * calipso_opt_del - Removes the CALIPSO option from an option header * @hop: the original header * @new: the new header * * Description: * Creates a new header based on @hop without any CALIPSO option. If @hop * doesn't contain a CALIPSO option it returns -ENOENT. If @hop contains * no other non-padding options, it returns zero with @new set to NULL. * Otherwise it returns zero, creates a new header without the CALIPSO * option (and removing as much padding as possible) and returns with * @new set to that header. * */ static int calipso_opt_del(struct ipv6_opt_hdr *hop, struct ipv6_opt_hdr **new) { int ret_val; unsigned int start, end, delta, pad, hop_len; ret_val = calipso_opt_find(hop, &start, &end); if (ret_val) return ret_val; hop_len = ipv6_optlen(hop); if (start == sizeof(*hop) && end == hop_len) { /* There's no other option in the header so return NULL */ *new = NULL; return 0; } delta = (end - start) & ~7; *new = kzalloc(hop_len - delta, GFP_ATOMIC); if (!*new) return -ENOMEM; memcpy(*new, hop, start); (*new)->hdrlen -= delta / 8; pad = (end - start) & 7; calipso_pad_write((unsigned char *)*new, start, pad); if (end != hop_len) memcpy((char *)*new + start + pad, (char *)hop + end, hop_len - end); return 0; } /** * calipso_opt_getattr - Get the security attributes from a memory block * @calipso: the CALIPSO option * @secattr: the security attributes * * Description: * Inspect @calipso and return the security attributes in @secattr. * Returns zero on success and negative values on failure. * */ static int calipso_opt_getattr(const unsigned char *calipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi, len = calipso[1], cat_len = calipso[6] * 4; struct calipso_doi *doi_def; if (cat_len + 8 > len) return -EINVAL; if (calipso_cache_check(calipso + 2, calipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(calipso + 2); rcu_read_lock(); doi_def = calipso_doi_search(doi); if (!doi_def) goto getattr_return; secattr->attr.mls.lvl = calipso[7]; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (cat_len) { ret_val = calipso_map_cat_ntoh(doi_def, calipso + 10, cat_len, secattr); if (ret_val != 0) { netlbl_catmap_free(secattr->attr.mls.cat); goto getattr_return; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } secattr->type = NETLBL_NLTYPE_CALIPSO; getattr_return: rcu_read_unlock(); return ret_val; } /* sock functions. */ /** * calipso_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CALIPSO option attached to the sock and if * there is return the CALIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ static int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); if (!txopts || !txopts->hopopt) goto done; hop = txopts->hopopt; opt = (unsigned char *)hop; opt_len = ipv6_optlen(hop); offset = sizeof(*hop); while (offset < opt_len) { len = calipso_tlv_len(hop, offset); if (len < 0) { ret_val = len; goto done; } switch (opt[offset]) { case IPV6_TLV_CALIPSO: if (len < CALIPSO_HDR_LEN) ret_val = -EINVAL; else ret_val = calipso_opt_getattr(&opt[offset], secattr); goto done; default: offset += len; break; } } done: txopt_put(txopts); return ret_val; } /** * calipso_sock_setattr - Add a CALIPSO option to a socket * @sk: the socket * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CALIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ static int calipso_sock_setattr(struct sock *sk, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct ipv6_opt_hdr *old, *new; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); old = NULL; if (txopts) old = txopts->hopopt; new = calipso_opt_insert(old, doi_def, secattr); txopt_put(txopts); if (IS_ERR(new)) return PTR_ERR(new); ret_val = calipso_opt_update(sk, new); kfree(new); return ret_val; } /** * calipso_sock_delattr - Delete the CALIPSO option from a socket * @sk: the socket * * Description: * Removes the CALIPSO option from a socket, if present. * */ static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); if (!txopts || !txopts->hopopt) goto done; if (calipso_opt_del(txopts->hopopt, &new_hop)) goto done; calipso_opt_update(sk, new_hop); kfree(new_hop); done: txopt_put(txopts); } /* request sock functions. */ /** * calipso_req_setattr - Add a CALIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CALIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ static int calipso_req_setattr(struct request_sock *req, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { struct ipv6_txoptions *txopts; struct inet_request_sock *req_inet = inet_rsk(req); struct ipv6_opt_hdr *old, *new; struct sock *sk = sk_to_full_sk(req_to_sk(req)); if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt) old = req_inet->ipv6_opt->hopopt; else old = NULL; new = calipso_opt_insert(old, doi_def, secattr); if (IS_ERR(new)) return PTR_ERR(new); txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new, new ? ipv6_optlen(new) : 0); kfree(new); if (IS_ERR(txopts)) return PTR_ERR(txopts); txopts = xchg(&req_inet->ipv6_opt, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } return 0; } /** * calipso_req_delattr - Delete the CALIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CALIPSO option from a request socket, if present. * */ static void calipso_req_delattr(struct request_sock *req) { struct inet_request_sock *req_inet = inet_rsk(req); struct ipv6_opt_hdr *new; struct ipv6_txoptions *txopts; struct sock *sk = sk_to_full_sk(req_to_sk(req)); if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt) return; if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) return; /* Nothing to do */ txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new, new ? ipv6_optlen(new) : 0); if (!IS_ERR(txopts)) { txopts = xchg(&req_inet->ipv6_opt, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } } kfree(new); } /* skbuff functions. */ /** * calipso_skbuff_optptr - Find the CALIPSO option in the packet * @skb: the packet * * Description: * Parse the packet's IP header looking for a CALIPSO option. Returns a pointer * to the start of the CALIPSO option on success, NULL if one if not found. * */ static unsigned char *calipso_skbuff_optptr(const struct sk_buff *skb) { const struct ipv6hdr *ip6_hdr = ipv6_hdr(skb); int offset; if (ip6_hdr->nexthdr != NEXTHDR_HOP) return NULL; offset = ipv6_find_tlv(skb, sizeof(*ip6_hdr), IPV6_TLV_CALIPSO); if (offset >= 0) return (unsigned char *)ip6_hdr + offset; return NULL; } /** * calipso_skbuff_setattr - Set the CALIPSO option on a packet * @skb: the packet * @doi_def: the CALIPSO DOI to use * @secattr: the security attributes * * Description: * Set the CALIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ static int calipso_skbuff_setattr(struct sk_buff *skb, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct ipv6hdr *ip6_hdr; struct ipv6_opt_hdr *hop; unsigned char buf[CALIPSO_MAX_BUFFER]; int len_delta, new_end, pad, payload; unsigned int start, end; ip6_hdr = ipv6_hdr(skb); if (ip6_hdr->nexthdr == NEXTHDR_HOP) { hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); ret_val = calipso_opt_find(hop, &start, &end); if (ret_val && ret_val != -ENOENT) return ret_val; } else { start = 0; end = 0; } memset(buf, 0, sizeof(buf)); ret_val = calipso_genopt(buf, start & 3, sizeof(buf), doi_def, secattr); if (ret_val < 0) return ret_val; new_end = start + ret_val; /* At this point new_end aligns to 4n, so (new_end & 4) pads to 8n */ pad = ((new_end & 4) + (end & 7)) & 7; len_delta = new_end - (int)end + pad; ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */ if (len_delta) { if (len_delta > 0) skb_push(skb, len_delta); else skb_pull(skb, -len_delta); memmove((char *)ip6_hdr - len_delta, ip6_hdr, sizeof(*ip6_hdr) + start); skb_reset_network_header(skb); ip6_hdr = ipv6_hdr(skb); payload = ntohs(ip6_hdr->payload_len); ip6_hdr->payload_len = htons(payload + len_delta); } hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); if (start == 0) { struct ipv6_opt_hdr *new_hop = (struct ipv6_opt_hdr *)buf; new_hop->nexthdr = ip6_hdr->nexthdr; new_hop->hdrlen = len_delta / 8 - 1; ip6_hdr->nexthdr = NEXTHDR_HOP; } else { hop->hdrlen += len_delta / 8; } memcpy((char *)hop + start, buf + (start & 3), new_end - start); calipso_pad_write((unsigned char *)hop, new_end, pad); return 0; } /** * calipso_skbuff_delattr - Delete any CALIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CALIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ static int calipso_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct ipv6hdr *ip6_hdr; struct ipv6_opt_hdr *old_hop; u32 old_hop_len, start = 0, end = 0, delta, size, pad; if (!calipso_skbuff_optptr(skb)) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; ip6_hdr = ipv6_hdr(skb); old_hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); old_hop_len = ipv6_optlen(old_hop); ret_val = calipso_opt_find(old_hop, &start, &end); if (ret_val) return ret_val; if (start == sizeof(*old_hop) && end == old_hop_len) { /* There's no other option in the header so we delete * the whole thing. */ delta = old_hop_len; size = sizeof(*ip6_hdr); ip6_hdr->nexthdr = old_hop->nexthdr; } else { delta = (end - start) & ~7; if (delta) old_hop->hdrlen -= delta / 8; pad = (end - start) & 7; size = sizeof(*ip6_hdr) + start + pad; calipso_pad_write((unsigned char *)old_hop, start, pad); } if (delta) { skb_pull(skb, delta); memmove((char *)ip6_hdr + delta, ip6_hdr, size); skb_reset_network_header(skb); } return 0; } static const struct netlbl_calipso_ops ops = { .doi_add = calipso_doi_add, .doi_free = calipso_doi_free, .doi_remove = calipso_doi_remove, .doi_getdef = calipso_doi_getdef, .doi_putdef = calipso_doi_putdef, .doi_walk = calipso_doi_walk, .sock_getattr = calipso_sock_getattr, .sock_setattr = calipso_sock_setattr, .sock_delattr = calipso_sock_delattr, .req_setattr = calipso_req_setattr, .req_delattr = calipso_req_delattr, .opt_getattr = calipso_opt_getattr, .skbuff_optptr = calipso_skbuff_optptr, .skbuff_setattr = calipso_skbuff_setattr, .skbuff_delattr = calipso_skbuff_delattr, .cache_invalidate = calipso_cache_invalidate, .cache_add = calipso_cache_add }; /** * calipso_init - Initialize the CALIPSO module * * Description: * Initialize the CALIPSO module and prepare it for use. Returns zero on * success and negative values on failure. * */ int __init calipso_init(void) { int ret_val; ret_val = calipso_cache_init(); if (!ret_val) netlbl_calipso_ops_register(&ops); return ret_val; } void calipso_exit(void) { netlbl_calipso_ops_register(NULL); calipso_cache_invalidate(); kfree(calipso_cache); }
gpl-2.0
rborisov/u-boot-am335x
common/cmd_yaffs2.c
128
7237
/* Yaffs commands. * Modified by Charles Manning by adding ydevconfig command. * * Use ydevconfig to configure a mountpoint before use. * For example: * # Configure mountpt xxx using nand device 0 using blocks 100-500 * ydevconfig xxx 0 100 500 * # Mount it * ymount xxx * # yls, yrdm etc * yls -l xxx * yrdm xxx/boot-image 82000000 * ... */ #include <common.h> #include <config.h> #include <command.h> #ifdef YAFFS2_DEBUG #define PRINTF(fmt, args...) printf(fmt, ##args) #else #define PRINTF(fmt, args...) do { } while (0) #endif extern void cmd_yaffs_dev_ls(void); extern void cmd_yaffs_tracemask(unsigned set, unsigned mask); extern void cmd_yaffs_devconfig(char *mp, int flash_dev, int start_block, int end_block); extern void cmd_yaffs_mount(char *mp); extern void cmd_yaffs_umount(char *mp); extern void cmd_yaffs_read_file(char *fn); extern void cmd_yaffs_write_file(char *fn, char bval, int sizeOfFile); extern void cmd_yaffs_ls(const char *mountpt, int longlist); extern void cmd_yaffs_mwrite_file(char *fn, char *addr, int size); extern void cmd_yaffs_mread_file(char *fn, char *addr); extern void cmd_yaffs_mkdir(const char *dir); extern void cmd_yaffs_rmdir(const char *dir); extern void cmd_yaffs_rm(const char *path); extern void cmd_yaffs_mv(const char *oldPath, const char *newPath); extern int yaffs_dump_dev(const char *path); /* ytrace - show/set yaffs trace mask */ int do_ytrace(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { if (argc > 1) cmd_yaffs_tracemask(1, simple_strtol(argv[1], NULL, 16)); else cmd_yaffs_tracemask(0, 0); return 0; } /* ydevls - lists yaffs mount points. */ int do_ydevls(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { cmd_yaffs_dev_ls(); return 0; } /* ydevconfig mount_pt mtd_dev_num start_block end_block */ int do_ydevconfig(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *mtpoint; int mtd_dev; int start_block; int end_block; if (argc != 5) { printf ("Bad arguments: ydevconfig mount_pt mtd_dev start_block end_block\n"); return -1; } mtpoint = argv[1]; mtd_dev = simple_strtol(argv[2], NULL, 16); start_block = simple_strtol(argv[3], NULL, 16); end_block = simple_strtol(argv[4], NULL, 16); cmd_yaffs_devconfig(mtpoint, mtd_dev, start_block, end_block); return 0; } int do_ymount(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *mtpoint; if (argc != 2) { printf("Bad arguments: ymount mount_pt\n"); return -1; } mtpoint = argv[1]; printf("Mounting yaffs2 mount point %s\n", mtpoint); cmd_yaffs_mount(mtpoint); return 0; } int do_yumount(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *mtpoint; if (argc != 2) { printf("Bad arguments: yumount mount_pt\n"); return -1; } mtpoint = argv[1]; printf("Unmounting yaffs2 mount point %s\n", mtpoint); cmd_yaffs_umount(mtpoint); return 0; } int do_yls(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *dirname; if (argc < 2 || argc > 3 || (argc == 3 && strcmp(argv[1], "-l"))) { printf("Bad arguments: yls [-l] dir\n"); return -1; } dirname = argv[argc - 1]; cmd_yaffs_ls(dirname, (argc > 2) ? 1 : 0); return 0; } int do_yrd(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *filename; if (argc != 2) { printf("Bad arguments: yrd file_name\n"); return -1; } filename = argv[1]; printf("Reading file %s ", filename); cmd_yaffs_read_file(filename); printf("done\n"); return 0; } int do_ywr(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *filename; ulong value; ulong numValues; if (argc != 4) { printf("Bad arguments: ywr file_name value n_values\n"); return -1; } filename = argv[1]; value = simple_strtoul(argv[2], NULL, 16); numValues = simple_strtoul(argv[3], NULL, 16); printf("Writing value (%lx) %lx times to %s... ", value, numValues, filename); cmd_yaffs_write_file(filename, value, numValues); printf("done\n"); return 0; } int do_yrdm(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *filename; ulong addr; if (argc != 3) { printf("Bad arguments: yrdm file_name addr\n"); return -1; } filename = argv[1]; addr = simple_strtoul(argv[2], NULL, 16); cmd_yaffs_mread_file(filename, (char *)addr); return 0; } int do_ywrm(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *filename; ulong addr; ulong size; if (argc != 4) { printf("Bad arguments: ywrm file_name addr size\n"); return -1; } filename = argv[1]; addr = simple_strtoul(argv[2], NULL, 16); size = simple_strtoul(argv[3], NULL, 16); cmd_yaffs_mwrite_file(filename, (char *)addr, size); return 0; } int do_ymkdir(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *dirname; if (argc != 2) { printf("Bad arguments: ymkdir dir_name\n"); return -1; } dirname = argv[1]; cmd_yaffs_mkdir(dirname); return 0; } int do_yrmdir(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *dirname; if (argc != 2) { printf("Bad arguments: yrmdir dir_name\n"); return -1; } dirname = argv[1]; cmd_yaffs_rmdir(dirname); return 0; } int do_yrm(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *name; if (argc != 2) { printf("Bad arguments: yrm name\n"); return -1; } name = argv[1]; cmd_yaffs_rm(name); return 0; } int do_ymv(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { char *oldPath; char *newPath; if (argc != 3) { printf("Bad arguments: ymv old_path new_path\n"); return -1; } oldPath = argv[1]; newPath = argv[2]; cmd_yaffs_mv(newPath, oldPath); return 0; } U_BOOT_CMD(ytrace, 2, 0, do_ytrace, "show/set yaffs trace mask", "ytrace [new_mask] show/set yaffs trace mask"); U_BOOT_CMD(ydevls, 1, 0, do_ydevls, "list yaffs mount points", "list yaffs mount points"); U_BOOT_CMD(ydevconfig, 5, 0, do_ydevconfig, "configure yaffs mount point", "ydevconfig mtpoint mtd_id start_block end_block configures a yaffs2 mount point"); U_BOOT_CMD(ymount, 2, 0, do_ymount, "mount yaffs", "ymount mtpoint mounts a yaffs2 mount point"); U_BOOT_CMD(yumount, 2, 0, do_yumount, "unmount yaffs", "yunmount mtpoint unmounts a yaffs2 mount point"); U_BOOT_CMD(yls, 3, 0, do_yls, "yaffs ls", "yls [-l] dirname"); U_BOOT_CMD(yrd, 2, 0, do_yrd, "read file from yaffs", "yrd path read file from yaffs"); U_BOOT_CMD(ywr, 4, 0, do_ywr, "write file to yaffs", "ywr filename value num_vlues write values to yaffs file"); U_BOOT_CMD(yrdm, 3, 0, do_yrdm, "read file to memory from yaffs", "yrdm filename offset reads yaffs file into memory"); U_BOOT_CMD(ywrm, 4, 0, do_ywrm, "write file from memory to yaffs", "ywrm filename offset size writes memory to yaffs file"); U_BOOT_CMD(ymkdir, 2, 0, do_ymkdir, "YAFFS mkdir", "ymkdir dir create a yaffs directory"); U_BOOT_CMD(yrmdir, 2, 0, do_yrmdir, "YAFFS rmdir", "yrmdir dirname removes a yaffs directory"); U_BOOT_CMD(yrm, 2, 0, do_yrm, "YAFFS rm", "yrm path removes a yaffs file"); U_BOOT_CMD(ymv, 4, 0, do_ymv, "YAFFS mv", "ymv old_path new_path moves/rename files within a yaffs mount point");
gpl-2.0
CSE3320/kernel-code
.backup_do_not_remove/drivers/acpi/acpica/dsinit.c
128
6183
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: dsinit - Object initialization namespace walk * * Copyright (C) 2000 - 2020, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acnamesp.h" #include "actables.h" #include "acinterp.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsinit") /* Local prototypes */ static acpi_status acpi_ds_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value); /******************************************************************************* * * FUNCTION: acpi_ds_init_one_object * * PARAMETERS: obj_handle - Node for the object * level - Current nesting level * context - Points to a init info struct * return_value - Not used * * RETURN: Status * * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every object * within the namespace. * * Currently, the only objects that require initialization are: * 1) Methods * 2) Operation Regions * ******************************************************************************/ static acpi_status acpi_ds_init_one_object(acpi_handle obj_handle, u32 level, void *context, void **return_value) { struct acpi_init_walk_info *info = (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; acpi_status status; union acpi_operand_object *obj_desc; ACPI_FUNCTION_ENTRY(); /* * We are only interested in NS nodes owned by the table that * was just loaded */ if (node->owner_id != info->owner_id) { return (AE_OK); } info->object_count++; /* And even then, we are only interested in a few object types */ switch (acpi_ns_get_type(obj_handle)) { case ACPI_TYPE_REGION: status = acpi_ds_initialize_region(obj_handle); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During Region initialization %p [%4.4s]", obj_handle, acpi_ut_get_node_name(obj_handle))); } info->op_region_count++; break; case ACPI_TYPE_METHOD: /* * Auto-serialization support. We will examine each method that is * not_serialized to determine if it creates any Named objects. If * it does, it will be marked serialized to prevent problems if * the method is entered by two or more threads and an attempt is * made to create the same named object twice -- which results in * an AE_ALREADY_EXISTS exception and method abort. */ info->method_count++; obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { break; } /* Ignore if already serialized */ if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { info->serial_method_count++; break; } if (acpi_gbl_auto_serialize_methods) { /* Parse/scan method and serialize it if necessary */ acpi_ds_auto_serialize_method(node, obj_desc); if (obj_desc->method. info_flags & ACPI_METHOD_SERIALIZED) { /* Method was just converted to Serialized */ info->serial_method_count++; info->serialized_method_count++; break; } } info->non_serial_method_count++; break; case ACPI_TYPE_DEVICE: info->device_count++; break; default: break; } /* * We ignore errors from above, and always return OK, since * we don't want to abort the walk on a single error. */ return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_initialize_objects * * PARAMETERS: table_desc - Descriptor for parent ACPI table * start_node - Root of subtree to be initialized. * * RETURN: Status * * DESCRIPTION: Walk the namespace starting at "StartNode" and perform any * necessary initialization on the objects found therein * ******************************************************************************/ acpi_status acpi_ds_initialize_objects(u32 table_index, struct acpi_namespace_node *start_node) { acpi_status status; struct acpi_init_walk_info info; struct acpi_table_header *table; acpi_owner_id owner_id; ACPI_FUNCTION_TRACE(ds_initialize_objects); status = acpi_tb_get_owner_id(table_index, &owner_id); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); /* Set all init info to zero */ memset(&info, 0, sizeof(struct acpi_init_walk_info)); info.owner_id = owner_id; info.table_index = table_index; /* Walk entire namespace from the supplied root */ /* * We don't use acpi_walk_namespace since we do not want to acquire * the namespace reader lock. */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, acpi_ds_init_one_object, NULL, &info, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); } status = acpi_get_table_by_index(table_index, &table); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* DSDT is always the first AML table */ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) { ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\nACPI table initialization:\n")); } /* Summary of objects initialized */ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Table [%4.4s: %-8.8s] (id %.2X) - %4u Objects with %3u Devices, " "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n", table->signature, table->oem_table_id, owner_id, info.object_count, info.device_count, info.op_region_count, info.method_count, info.serial_method_count, info.non_serial_method_count, info.serialized_method_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); }
gpl-2.0
Eason91/kernel_lge_x3
drivers/md/dm-table.c
384
35036
/* * Copyright (C) 2001 Sistina Software (UK) Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/atomic.h> #define DM_MSG_PREFIX "table" #define MAX_DEPTH 16 #define NODE_SIZE L1_CACHE_BYTES #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) /* * The table has always exactly one reference from either mapped_device->map * or hash_cell->new_map. This reference is not counted in table->holders. * A pair of dm_create_table/dm_destroy_table functions is used for table * creation/destruction. * * Temporary references from the other code increase table->holders. A pair * of dm_table_get/dm_table_put functions is used to manipulate it. * * When the table is about to be destroyed, we wait for table->holders to * drop to zero. */ struct dm_table { struct mapped_device *md; atomic_t holders; unsigned type; /* btree table */ unsigned int depth; unsigned int counts[MAX_DEPTH]; /* in nodes */ sector_t *index[MAX_DEPTH]; unsigned int num_targets; unsigned int num_allocated; sector_t *highs; struct dm_target *targets; unsigned integrity_supported:1; /* * Indicates the rw permissions for the new logical * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ fmode_t mode; /* a list of devices used by this table */ struct list_head devices; /* events get handed up using this callback */ void (*event_fn)(void *); void *event_context; struct dm_md_mempools *mempools; struct list_head target_callbacks; }; /* * Similar to ceiling(log_size(n)) */ static unsigned int int_log(unsigned int n, unsigned int base) { int result = 0; while (n > 1) { n = dm_div_up(n, base); result++; } return result; } /* * Calculate the index of the child node of the n'th node k'th key. */ static inline unsigned int get_child(unsigned int n, unsigned int k) { return (n * CHILDREN_PER_NODE) + k; } /* * Return the n'th node of level l from table t. */ static inline sector_t *get_node(struct dm_table *t, unsigned int l, unsigned int n) { return t->index[l] + (n * KEYS_PER_NODE); } /* * Return the highest key that you could lookup from the n'th * node on level l of the btree. */ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) { for (; l < t->depth - 1; l++) n = get_child(n, CHILDREN_PER_NODE - 1); if (n >= t->counts[l]) return (sector_t) - 1; return get_node(t, l, n)[KEYS_PER_NODE - 1]; } /* * Fills in a level of the btree based on the highs of the level * below it. */ static int setup_btree_index(unsigned int l, struct dm_table *t) { unsigned int n, k; sector_t *node; for (n = 0U; n < t->counts[l]; n++) { node = get_node(t, l, n); for (k = 0U; k < KEYS_PER_NODE; k++) node[k] = high(t, l + 1, get_child(n, k)); } return 0; } void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) { unsigned long size; void *addr; /* * Check that we're not going to overflow. */ if (nmemb > (ULONG_MAX / elem_size)) return NULL; size = nmemb * elem_size; addr = vzalloc(size); return addr; } EXPORT_SYMBOL(dm_vcalloc); /* * highs, and targets are managed as dynamic arrays during a * table load. */ static int alloc_targets(struct dm_table *t, unsigned int num) { sector_t *n_highs; struct dm_target *n_targets; int n = t->num_targets; /* * Allocate both the target array and offset array at once. * Append an empty entry to catch sectors beyond the end of * the device. */ n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + sizeof(sector_t)); if (!n_highs) return -ENOMEM; n_targets = (struct dm_target *) (n_highs + num); if (n) { memcpy(n_highs, t->highs, sizeof(*n_highs) * n); memcpy(n_targets, t->targets, sizeof(*n_targets) * n); } memset(n_highs + n, -1, sizeof(*n_highs) * (num - n)); vfree(t->highs); t->num_allocated = num; t->highs = n_highs; t->targets = n_targets; return 0; } int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->target_callbacks); atomic_set(&t->holders, 0); if (!num_targets) num_targets = KEYS_PER_NODE; num_targets = dm_round_up(num_targets, KEYS_PER_NODE); if (alloc_targets(t, num_targets)) { kfree(t); t = NULL; return -ENOMEM; } t->mode = mode; t->md = md; *result = t; return 0; } static void free_devices(struct list_head *devices) { struct list_head *tmp, *next; list_for_each_safe(tmp, next, devices) { struct dm_dev_internal *dd = list_entry(tmp, struct dm_dev_internal, list); DMWARN("dm_table_destroy: dm_put_device call missing for %s", dd->dm_dev.name); kfree(dd); } } void dm_table_destroy(struct dm_table *t) { unsigned int i; if (!t) return; while (atomic_read(&t->holders)) msleep(1); smp_mb(); /* free the indexes */ if (t->depth >= 2) vfree(t->index[t->depth - 2]); /* free the targets */ for (i = 0; i < t->num_targets; i++) { struct dm_target *tgt = t->targets + i; if (tgt->type->dtr) tgt->type->dtr(tgt); dm_put_target_type(tgt->type); } vfree(t->highs); /* free the device list */ if (t->devices.next != &t->devices) free_devices(&t->devices); dm_free_md_mempools(t->mempools); kfree(t); } void dm_table_get(struct dm_table *t) { atomic_inc(&t->holders); } EXPORT_SYMBOL(dm_table_get); void dm_table_put(struct dm_table *t) { if (!t) return; smp_mb__before_atomic_dec(); atomic_dec(&t->holders); } EXPORT_SYMBOL(dm_table_put); /* * Checks to see if we need to extend highs or targets. */ static inline int check_space(struct dm_table *t) { if (t->num_targets >= t->num_allocated) return alloc_targets(t, t->num_allocated * 2); return 0; } /* * See if we've already got a device in the list. */ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) { struct dm_dev_internal *dd; list_for_each_entry (dd, l, list) if (dd->dm_dev.bdev->bd_dev == dev) return dd; return NULL; } /* * Open a device so we can use it as a map destination. */ static int open_dev(struct dm_dev_internal *d, dev_t dev, struct mapped_device *md) { static char *_claim_ptr = "I belong to device-mapper"; struct block_device *bdev; int r; BUG_ON(d->dm_dev.bdev); bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr); if (IS_ERR(bdev)) return PTR_ERR(bdev); r = bd_link_disk_holder(bdev, dm_disk(md)); if (r) { blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL); return r; } d->dm_dev.bdev = bdev; return 0; } /* * Close a device that we've been using. */ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) { if (!d->dm_dev.bdev) return; bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md)); blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL); d->dm_dev.bdev = NULL; } /* * If possible, this checks an area of a destination device is invalid. */ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q; struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; sector_t dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; unsigned short logical_block_size_sectors = limits->logical_block_size >> SECTOR_SHIFT; char b[BDEVNAME_SIZE]; /* * Some devices exist without request functions, * such as loop devices not yet bound to backing files. * Forbid the use of such devices. */ q = bdev_get_queue(bdev); if (!q || !q->make_request_fn) { DMWARN("%s: %s is not yet initialised: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (!dev_size) return 0; if ((start >= dev_size) || (start + len > dev_size)) { DMWARN("%s: %s too small for target: " "start=%llu, len=%llu, dev_size=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), (unsigned long long)start, (unsigned long long)len, (unsigned long long)dev_size); return 1; } if (logical_block_size_sectors <= 1) return 0; if (start & (logical_block_size_sectors - 1)) { DMWARN("%s: start=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)start, limits->logical_block_size, bdevname(bdev, b)); return 1; } if (len & (logical_block_size_sectors - 1)) { DMWARN("%s: len=%llu not aligned to h/w " "logical block size %u of %s", dm_device_name(ti->table->md), (unsigned long long)len, limits->logical_block_size, bdevname(bdev, b)); return 1; } return 0; } /* * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case * it is accessed concurrently inside dm_table_any_congested(). */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; struct dm_dev_internal dd_new, dd_old; dd_new = dd_old = *dd; dd_new.dm_dev.mode |= new_mode; dd_new.dm_dev.bdev = NULL; r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md); if (r) return r; dd->dm_dev.mode |= new_mode; close_dev(&dd_old, md); return 0; } /* * Add a device to the list, or just increment the usage count if * it's already present. */ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, struct dm_dev **result) { int r; dev_t uninitialized_var(dev); struct dm_dev_internal *dd; unsigned int major, minor; struct dm_table *t = ti->table; BUG_ON(!t); if (sscanf(path, "%u:%u", &major, &minor) == 2) { /* Extract the major/minor numbers */ dev = MKDEV(major, minor); if (MAJOR(dev) != major || MINOR(dev) != minor) return -EOVERFLOW; } else { /* convert the path to a device */ struct block_device *bdev = lookup_bdev(path); if (IS_ERR(bdev)) return PTR_ERR(bdev); dev = bdev->bd_dev; bdput(bdev); } dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); if (!dd) return -ENOMEM; dd->dm_dev.mode = mode; dd->dm_dev.bdev = NULL; if ((r = open_dev(dd, dev, t->md))) { kfree(dd); return r; } format_dev_t(dd->dm_dev.name, dev); atomic_set(&dd->count, 0); list_add(&dd->list, &t->devices); } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) { r = upgrade_mode(dd, mode, t->md); if (r) return r; } atomic_inc(&dd->count); *result = &dd->dm_dev; return 0; } EXPORT_SYMBOL(dm_get_device); int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct queue_limits *limits = data; struct block_device *bdev = dev->bdev; struct request_queue *q = bdev_get_queue(bdev); char b[BDEVNAME_SIZE]; if (unlikely(!q)) { DMWARN("%s: Cannot set limits for nonexistent device %s", dm_device_name(ti->table->md), bdevname(bdev, b)); return 0; } if (bdev_stack_limits(limits, bdev, start) < 0) DMWARN("%s: adding target device %s caused an alignment inconsistency: " "physical_block_size=%u, logical_block_size=%u, " "alignment_offset=%u, start=%llu", dm_device_name(ti->table->md), bdevname(bdev, b), q->limits.physical_block_size, q->limits.logical_block_size, q->limits.alignment_offset, (unsigned long long) start << SECTOR_SHIFT); /* * Check if merge fn is supported. * If not we'll force DM to use PAGE_SIZE or * smaller I/O, just to be safe. */ if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) blk_limits_max_hw_sectors(limits, (unsigned int) (PAGE_SIZE >> 9)); return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); /* * Decrement a device's use count and remove it if necessary. */ void dm_put_device(struct dm_target *ti, struct dm_dev *d) { struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal, dm_dev); if (atomic_dec_and_test(&dd->count)) { close_dev(dd, ti->table->md); list_del(&dd->list); kfree(dd); } } EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. */ static int adjoin(struct dm_table *table, struct dm_target *ti) { struct dm_target *prev; if (!table->num_targets) return !ti->begin; prev = &table->targets[table->num_targets - 1]; return (ti->begin == (prev->begin + prev->len)); } /* * Used to dynamically allocate the arg array. */ static char **realloc_argv(unsigned *array_size, char **old_argv) { char **argv; unsigned new_size; new_size = *array_size ? *array_size * 2 : 64; argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL); if (argv) { memcpy(argv, old_argv, *array_size * sizeof(*argv)); *array_size = new_size; } kfree(old_argv); return argv; } /* * Destructively splits up the argument list to pass to ctr. */ int dm_split_args(int *argc, char ***argvp, char *input) { char *start, *end = input, *out, **argv = NULL; unsigned array_size = 0; *argc = 0; if (!input) { *argvp = NULL; return 0; } argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; while (1) { /* Skip whitespace */ start = skip_spaces(end); if (!*start) break; /* success, we hit the end */ /* 'out' is used to remove any back-quotes */ end = out = start; while (*end) { /* Everything apart from '\0' can be quoted */ if (*end == '\\' && *(end + 1)) { *out++ = *(end + 1); end += 2; continue; } if (isspace(*end)) break; /* end of token */ *out++ = *end++; } /* have we already filled the array ? */ if ((*argc + 1) > array_size) { argv = realloc_argv(&array_size, argv); if (!argv) return -ENOMEM; } /* we know this is whitespace */ if (*end) end++; /* terminate the string and put it in the array */ *out = '\0'; argv[*argc] = start; (*argc)++; } *argvp = argv; return 0; } /* * Impose necessary and sufficient conditions on a devices's table such * that any incoming bio which respects its logical_block_size can be * processed successfully. If it falls across the boundary between * two or more targets, the size of each piece it gets split into must * be compatible with the logical_block_size of the target processing it. */ static int validate_hardware_logical_block_alignment(struct dm_table *table, struct queue_limits *limits) { /* * This function uses arithmetic modulo the logical_block_size * (in units of 512-byte sectors). */ unsigned short device_logical_block_size_sects = limits->logical_block_size >> SECTOR_SHIFT; /* * Offset of the start of the next table entry, mod logical_block_size. */ unsigned short next_target_start = 0; /* * Given an aligned bio that extends beyond the end of a * target, how many sectors must the next target handle? */ unsigned short remaining = 0; struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; /* * Check each entry in the table in turn. */ while (i < dm_table_get_num_targets(table)) { ti = dm_table_get_target(table, i++); blk_set_default_limits(&ti_limits); /* combine all target devices' limits */ if (ti->type->iterate_devices) ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* * If the remaining sectors fall entirely within this * table entry are they compatible with its logical_block_size? */ if (remaining < ti->len && remaining & ((ti_limits.logical_block_size >> SECTOR_SHIFT) - 1)) break; /* Error */ next_target_start = (unsigned short) ((next_target_start + ti->len) & (device_logical_block_size_sects - 1)); remaining = next_target_start ? device_logical_block_size_sects - next_target_start : 0; } if (remaining) { DMWARN("%s: table line %u (start sect %llu len %llu) " "not aligned to h/w logical block size %u", dm_device_name(table->md), i, (unsigned long long) ti->begin, (unsigned long long) ti->len, limits->logical_block_size); return -EINVAL; } return 0; } int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params) { int r = -EINVAL, argc; char **argv; struct dm_target *tgt; if ((r = check_space(t))) return r; tgt = t->targets + t->num_targets; memset(tgt, 0, sizeof(*tgt)); if (!len) { DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } tgt->type = dm_get_target_type(type); if (!tgt->type) { DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); return -EINVAL; } tgt->table = t; tgt->begin = start; tgt->len = len; tgt->error = "Unknown error"; /* * Does this target adjoin the previous one ? */ if (!adjoin(t, tgt)) { tgt->error = "Gap in table"; r = -EINVAL; goto bad; } r = dm_split_args(&argc, &argv, params); if (r) { tgt->error = "couldn't split parameters (insufficient memory)"; goto bad; } r = tgt->type->ctr(tgt, argc, argv); kfree(argv); if (r) goto bad; t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; if (!tgt->num_discard_requests && tgt->discards_supported) DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.", dm_device_name(t->md), type); return 0; bad: DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); dm_put_target_type(tgt->type); return r; } /* * Target argument parsing helpers. */ static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error, unsigned grouped) { const char *arg_str = dm_shift_arg(arg_set); if (!arg_str || (sscanf(arg_str, "%u", value) != 1) || (*value < arg->min) || (*value > arg->max) || (grouped && arg_set->argc < *value)) { *error = arg->error; return -EINVAL; } return 0; } int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 0); } EXPORT_SYMBOL(dm_read_arg); int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, unsigned *value, char **error) { return validate_next_arg(arg, arg_set, value, error, 1); } EXPORT_SYMBOL(dm_read_arg_group); const char *dm_shift_arg(struct dm_arg_set *as) { char *r; if (as->argc) { as->argc--; r = *as->argv; as->argv++; return r; } return NULL; } EXPORT_SYMBOL(dm_shift_arg); void dm_consume_args(struct dm_arg_set *as, unsigned num_args) { BUG_ON(as->argc < num_args); as->argc -= num_args; as->argv += num_args; } EXPORT_SYMBOL(dm_consume_args); static int dm_table_set_type(struct dm_table *t) { unsigned i; unsigned bio_based = 0, request_based = 0; struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices; for (i = 0; i < t->num_targets; i++) { tgt = t->targets + i; if (dm_target_request_based(tgt)) request_based = 1; else bio_based = 1; if (bio_based && request_based) { DMWARN("Inconsistent table: different target types" " can't be mixed up"); return -EINVAL; } } if (bio_based) { /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; return 0; } BUG_ON(!request_based); /* No targets in this table */ /* Non-request-stackable devices can't be used for request-based dm */ devices = dm_table_get_devices(t); list_for_each_entry(dd, devices, list) { if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) { DMWARN("table load rejected: including" " non-request-stackable devices"); return -EINVAL; } } /* * Request-based dm supports only tables that have a single target now. * To support multiple targets, request splitting support is needed, * and that needs lots of changes in the block-layer. * (e.g. request completion process for partial completion.) */ if (t->num_targets > 1) { DMWARN("Request-based dm doesn't support multiple targets yet"); return -EINVAL; } t->type = DM_TYPE_REQUEST_BASED; return 0; } unsigned dm_table_get_type(struct dm_table *t) { return t->type; } bool dm_table_request_based(struct dm_table *t) { return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; } int dm_table_alloc_md_mempools(struct dm_table *t) { unsigned type = dm_table_get_type(t); if (unlikely(type == DM_TYPE_NONE)) { DMWARN("no table type is set, can't allocate mempools"); return -EINVAL; } t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); if (!t->mempools) return -ENOMEM; return 0; } void dm_table_free_md_mempools(struct dm_table *t) { dm_free_md_mempools(t->mempools); t->mempools = NULL; } struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) { return t->mempools; } static int setup_indexes(struct dm_table *t) { int i; unsigned int total = 0; sector_t *indexes; /* allocate the space for *all* the indexes */ for (i = t->depth - 2; i >= 0; i--) { t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); total += t->counts[i]; } indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); if (!indexes) return -ENOMEM; /* set up internal nodes, bottom-up */ for (i = t->depth - 2; i >= 0; i--) { t->index[i] = indexes; indexes += (KEYS_PER_NODE * t->counts[i]); setup_btree_index(i, t); } return 0; } /* * Builds the btree to index the map. */ static int dm_table_build_index(struct dm_table *t) { int r = 0; unsigned int leaf_nodes; /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); /* leaf layer has already been set up */ t->counts[t->depth - 1] = leaf_nodes; t->index[t->depth - 1] = t->highs; if (t->depth >= 2) r = setup_indexes(t); return r; } /* * Get a disk whose integrity profile reflects the table's profile. * If %match_all is true, all devices' profiles must match. * If %match_all is false, all devices must at least have an * allocated integrity profile; but uninitialized is ok. * Returns NULL if integrity support was inconsistent or unavailable. */ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t, bool match_all) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; struct gendisk *prev_disk = NULL, *template_disk = NULL; list_for_each_entry(dd, devices, list) { template_disk = dd->dm_dev.bdev->bd_disk; if (!blk_get_integrity(template_disk)) goto no_integrity; if (!match_all && !blk_integrity_is_initialized(template_disk)) continue; /* skip uninitialized profiles */ else if (prev_disk && blk_integrity_compare(prev_disk, template_disk) < 0) goto no_integrity; prev_disk = template_disk; } return template_disk; no_integrity: if (prev_disk) DMWARN("%s: integrity not set: %s and %s profile mismatch", dm_device_name(t->md), prev_disk->disk_name, template_disk->disk_name); return NULL; } /* * Register the mapped device for blk_integrity support if * the underlying devices have an integrity profile. But all devices * may not have matching profiles (checking all devices isn't reliable * during table load because this table may use other DM device(s) which * must be resumed before they will have an initialized integity profile). * Stacked DM devices force a 2 stage integrity profile validation: * 1 - during load, validate all initialized integrity profiles match * 2 - during resume, validate all integrity profiles match */ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) { struct gendisk *template_disk = NULL; template_disk = dm_table_get_integrity_disk(t, false); if (!template_disk) return 0; if (!blk_integrity_is_initialized(dm_disk(md))) { t->integrity_supported = 1; return blk_integrity_register(dm_disk(md), NULL); } /* * If DM device already has an initalized integrity * profile the new profile should not conflict. */ if (blk_integrity_is_initialized(template_disk) && blk_integrity_compare(dm_disk(md), template_disk) < 0) { DMWARN("%s: conflict with existing integrity profile: " "%s profile mismatch", dm_device_name(t->md), template_disk->disk_name); return 1; } /* Preserve existing initialized integrity profile */ t->integrity_supported = 1; return 0; } /* * Prepares the table for use by building the indices, * setting the type, and allocating mempools. */ int dm_table_complete(struct dm_table *t) { int r; r = dm_table_set_type(t); if (r) { DMERR("unable to set table type"); return r; } r = dm_table_build_index(t); if (r) { DMERR("unable to build btrees"); return r; } r = dm_table_prealloc_integrity(t, t->md); if (r) { DMERR("could not register integrity profile."); return r; } r = dm_table_alloc_md_mempools(t); if (r) DMERR("unable to allocate mempools"); return r; } static DEFINE_MUTEX(_event_lock); void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context) { mutex_lock(&_event_lock); t->event_fn = fn; t->event_context = context; mutex_unlock(&_event_lock); } void dm_table_event(struct dm_table *t) { /* * You can no longer call dm_table_event() from interrupt * context, use a bottom half instead. */ BUG_ON(in_interrupt()); mutex_lock(&_event_lock); if (t->event_fn) t->event_fn(t->event_context); mutex_unlock(&_event_lock); } EXPORT_SYMBOL(dm_table_event); sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } EXPORT_SYMBOL(dm_table_get_size); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { if (index >= t->num_targets) return NULL; return t->targets + index; } /* * Search the btree for the correct target. * * Caller should check returned pointer with dm_target_is_valid() * to trap I/O beyond end of device. */ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) { unsigned int l, n = 0, k = 0; sector_t *node; for (l = 0; l < t->depth; l++) { n = get_child(n, k); node = get_node(t, l, n); for (k = 0; k < KEYS_PER_NODE; k++) if (node[k] >= sector) break; } return &t->targets[(KEYS_PER_NODE * n) + k]; } /* * Establish the new table's queue_limits and validate them. */ int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits) { struct dm_target *uninitialized_var(ti); struct queue_limits ti_limits; unsigned i = 0; blk_set_default_limits(limits); while (i < dm_table_get_num_targets(table)) { blk_set_default_limits(&ti_limits); ti = dm_table_get_target(table, i++); if (!ti->type->iterate_devices) goto combine_limits; /* * Combine queue limits of all the devices this target uses. */ ti->type->iterate_devices(ti, dm_set_device_limits, &ti_limits); /* Set I/O hints portion of queue limits */ if (ti->type->io_hints) ti->type->io_hints(ti, &ti_limits); /* * Check each device area is consistent with the target's * overall queue limits. */ if (ti->type->iterate_devices(ti, device_area_is_invalid, &ti_limits)) return -EINVAL; combine_limits: /* * Merge this target's queue limits into the overall limits * for the table. */ if (blk_stack_limits(limits, &ti_limits, 0) < 0) DMWARN("%s: adding target device " "(start sect %llu len %llu) " "caused an alignment inconsistency", dm_device_name(table->md), (unsigned long long) ti->begin, (unsigned long long) ti->len); } return validate_hardware_logical_block_alignment(table, limits); } /* * Set the integrity profile for this device if all devices used have * matching profiles. We're quite deep in the resume path but still * don't know if all devices (particularly DM devices this device * may be stacked on) have matching profiles. Even if the profiles * don't match we have no way to fail (to resume) at this point. */ static void dm_table_set_integrity(struct dm_table *t) { struct gendisk *template_disk = NULL; if (!blk_get_integrity(dm_disk(t->md))) return; template_disk = dm_table_get_integrity_disk(t, true); if (template_disk) blk_integrity_register(dm_disk(t->md), blk_get_integrity(template_disk)); else if (blk_integrity_is_initialized(dm_disk(t->md))) DMWARN("%s: device no longer has a valid integrity profile", dm_device_name(t->md)); else DMWARN("%s: unable to establish an integrity profile", dm_device_name(t->md)); } static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { unsigned flush = (*(unsigned *)data); struct request_queue *q = bdev_get_queue(dev->bdev); return q && (q->flush_flags & flush); } static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) { struct dm_target *ti; unsigned i = 0; /* * Require at least one underlying device to support flushes. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting flushes must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_flush_requests) continue; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_flush_capable, &flush)) return 1; } return 0; } static bool dm_table_discard_zeroes_data(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* Ensure that all targets supports discard_zeroes_data. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (ti->discard_zeroes_data_unsupported) return 0; } return 1; } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { unsigned flush = 0; /* * Copy table's limits to the DM device's request_queue */ q->limits = *limits; if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, REQ_FLUSH)) { flush |= REQ_FLUSH; if (dm_table_supports_flush(t, REQ_FUA)) flush |= REQ_FUA; } blk_queue_flush(q, flush); if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; dm_table_set_integrity(t); /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios * are processed by request-based dm, which refers to the queue * settings. * Until the flag set, bios are passed to bio-based dm and queued to * md->deferred where queue settings are not needed yet. * Those bios are passed to request-based dm at the resume time. */ smp_mb(); if (dm_table_request_based(t)) queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) { return t->num_targets; } struct list_head *dm_table_get_devices(struct dm_table *t) { return &t->devices; } fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } EXPORT_SYMBOL(dm_table_get_mode); static void suspend_targets(struct dm_table *t, unsigned postsuspend) { int i = t->num_targets; struct dm_target *ti = t->targets; while (i--) { if (postsuspend) { if (ti->type->postsuspend) ti->type->postsuspend(ti); } else if (ti->type->presuspend) ti->type->presuspend(ti); ti++; } } void dm_table_presuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, 0); } void dm_table_postsuspend_targets(struct dm_table *t) { if (!t) return; suspend_targets(t, 1); } int dm_table_resume_targets(struct dm_table *t) { int i, r = 0; for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (!ti->type->preresume) continue; r = ti->type->preresume(ti); if (r) return r; } for (i = 0; i < t->num_targets; i++) { struct dm_target *ti = t->targets + i; if (ti->type->resume) ti->type->resume(ti); } return 0; } void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) { list_add(&cb->list, &t->target_callbacks); } EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); int dm_table_any_congested(struct dm_table *t, int bdi_bits) { struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); struct dm_target_callbacks *cb; int r = 0; list_for_each_entry(dd, devices, list) { struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev); char b[BDEVNAME_SIZE]; if (likely(q)) r |= bdi_congested(&q->backing_dev_info, bdi_bits); else DMWARN_LIMIT("%s: any_congested: nonexistent device %s", dm_device_name(t->md), bdevname(dd->dm_dev.bdev, b)); } list_for_each_entry(cb, &t->target_callbacks, list) if (cb->congested_fn) r |= cb->congested_fn(cb, bdi_bits); return r; } int dm_table_any_busy_target(struct dm_table *t) { unsigned i; struct dm_target *ti; for (i = 0; i < t->num_targets; i++) { ti = t->targets + i; if (ti->type->busy && ti->type->busy(ti)) return 1; } return 0; } struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; } EXPORT_SYMBOL(dm_table_get_md); static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); return q && blk_queue_discard(q); } bool dm_table_supports_discards(struct dm_table *t) { struct dm_target *ti; unsigned i = 0; /* * Unless any target used by the table set discards_supported, * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets * supporting discard selectively must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->num_discard_requests) continue; if (ti->discards_supported) return 1; if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_discard_capable, NULL)) return 1; } return 0; }
gpl-2.0
zaventh/nexus7-kernel-grouper
drivers/net/wireless/ath/ath5k/pci.c
384
9736
/* * Copyright (c) 2008-2009 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/nl80211.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/etherdevice.h> #include "../ath.h" #include "ath5k.h" #include "debug.h" #include "base.h" #include "reg.h" /* Known PCI ids */ static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */ { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */ { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */ { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */ { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */ { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */ { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */ { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ { 0 } }; MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); /* return bus cachesize in 4B word units */ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) { struct ath5k_hw *ah = (struct ath5k_hw *) common->priv; u8 u8tmp; pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp); *csz = (int)u8tmp; /* * This check was put in to avoid "unpleasant" consequences if * the bootrom has not fully initialized all PCI devices. * Sometimes the cache line size register is not set */ if (*csz == 0) *csz = L1_CACHE_BYTES >> 2; /* Use the default size */ } /* * Read from eeprom */ static bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) { struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; u32 status, timeout; /* * Initialize EEPROM access */ if (ah->ah_version == AR5K_AR5210) { AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE); (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset)); } else { ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE); AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD, AR5K_EEPROM_CMD_READ); } for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) { status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); if (status & AR5K_EEPROM_STAT_RDDONE) { if (status & AR5K_EEPROM_STAT_RDERR) return false; *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & 0xffff); return true; } udelay(15); } return false; } int ath5k_hw_read_srev(struct ath5k_hw *ah) { ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV); return 0; } /* * Read the MAC address from eeprom or platform_data */ static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) { u8 mac_d[ETH_ALEN] = {}; u32 total, offset; u16 data; int octet; AR5K_EEPROM_READ(0x20, data); for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { AR5K_EEPROM_READ(offset, data); total += data; mac_d[octet + 1] = data & 0xff; mac_d[octet] = data >> 8; octet += 2; } if (!total || total == 3 * 0xffff) return -EINVAL; memcpy(mac, mac_d, ETH_ALEN); return 0; } /* Common ath_bus_opts structure */ static const struct ath_bus_ops ath_pci_bus_ops = { .ath_bus_type = ATH_PCI, .read_cachesize = ath5k_pci_read_cachesize, .eeprom_read = ath5k_pci_eeprom_read, .eeprom_read_mac = ath5k_pci_eeprom_read_mac, }; /********************\ * PCI Initialization * \********************/ static int __devinit ath5k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *mem; struct ath5k_hw *ah; struct ieee80211_hw *hw; int ret; u8 csz; /* * L0s needs to be disabled on all ath5k cards. * * For distributions shipping with CONFIG_PCIEASPM (this will be enabled * by default in the future in 2.6.36) this will also mean both L1 and * L0s will be disabled when a pre 1.1 PCIe device is detected. We do * know L1 works correctly even for all ath5k pre 1.1 PCIe devices * though but cannot currently undue the effect of a blacklist, for * details you can read pcie_aspm_sanity_check() and see how it adjusts * the device link capability. * * It may be possible in the future to implement some PCI API to allow * drivers to override blacklists for pre 1.1 PCIe but for now it is * best to accept that both L0s and L1 will be disabled completely for * distributions shipping with CONFIG_PCIEASPM rather than having this * issue present. Motivation for adding this new API will be to help * with power consumption for some of these devices. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "can't enable device\n"); goto err; } /* XXX 32-bit addressing only */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "32-bit DMA not available\n"); goto err_dis; } /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); if (csz == 0) { /* * Linux 2.4.18 (at least) writes the cache line size * register as a 16-bit wide register which is wrong. * We must have this setup properly for rx buffer * DMA to work so force a reasonable value here if it * comes up zero. */ csz = L1_CACHE_BYTES >> 2; pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); } /* * The default setting of latency timer yields poor results, * set it to the value used by other systems. It may be worth * tweaking this setting more. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); /* Enable bus mastering */ pci_set_master(pdev); /* * Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_write_config_byte(pdev, 0x41, 0); ret = pci_request_region(pdev, 0, "ath5k"); if (ret) { dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); goto err_dis; } mem = pci_iomap(pdev, 0, 0); if (!mem) { dev_err(&pdev->dev, "cannot remap PCI memory region\n"); ret = -EIO; goto err_reg; } /* * Allocate hw (mac80211 main struct) * and hw->priv (driver private data) */ hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops); if (hw == NULL) { dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); ret = -ENOMEM; goto err_map; } dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); ah = hw->priv; ah->hw = hw; ah->pdev = pdev; ah->dev = &pdev->dev; ah->irq = pdev->irq; ah->devid = id->device; ah->iobase = mem; /* So we can unmap it on detach */ /* Initialize */ ret = ath5k_init_softc(ah, &ath_pci_bus_ops); if (ret) goto err_free; /* Set private data */ pci_set_drvdata(pdev, hw); return 0; err_free: ieee80211_free_hw(hw); err_map: pci_iounmap(pdev, mem); err_reg: pci_release_region(pdev, 0); err_dis: pci_disable_device(pdev); err: return ret; } static void __devexit ath5k_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_hw *ah = hw->priv; ath5k_deinit_softc(ah); pci_iounmap(pdev, ah->iobase); pci_release_region(pdev, 0); pci_disable_device(pdev); ieee80211_free_hw(hw); } #ifdef CONFIG_PM_SLEEP static int ath5k_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_hw *ah = hw->priv; ath5k_led_off(ah); return 0; } static int ath5k_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_hw *ah = hw->priv; /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, 0x41, 0); ath5k_led_enable(ah); return 0; } static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); #define ATH5K_PM_OPS (&ath5k_pm_ops) #else #define ATH5K_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver ath5k_pci_driver = { .name = KBUILD_MODNAME, .id_table = ath5k_pci_id_table, .probe = ath5k_pci_probe, .remove = __devexit_p(ath5k_pci_remove), .driver.pm = ATH5K_PM_OPS, }; /* * Module init/exit functions */ static int __init init_ath5k_pci(void) { int ret; ret = pci_register_driver(&ath5k_pci_driver); if (ret) { printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); return ret; } return 0; } static void __exit exit_ath5k_pci(void) { pci_unregister_driver(&ath5k_pci_driver); } module_init(init_ath5k_pci); module_exit(exit_ath5k_pci);
gpl-2.0
steev/linux-kernel
drivers/scsi/u14-34f.c
896
67705
/* * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters. * * 03 Jun 2003 Rev. 8.10 for linux-2.5.70 * + Update for new IRQ API. * + Use "goto" when appropriate. * + Drop u14-34f.h. * + Update for new module_param API. * + Module parameters can now be specified only in the * same format as the kernel boot options. * * boot option old module param * ----------- ------------------ * addr,... io_port=addr,... * lc:[y|n] linked_comm=[1|0] * mq:xx max_queue_depth=xx * tm:[0|1|2] tag_mode=[0|1|2] * et:[y|n] ext_tran=[1|0] * of:[y|n] have_old_firmware=[1|0] * * A valid example using the new parameter format is: * modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4" * * which is equivalent to the old format: * modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \ * max_queue_depth=4 * * With actual module code, u14-34f and u14_34f are equivalent * as module parameter names. * * 12 Feb 2003 Rev. 8.04 for linux 2.5.60 * + Release irq before calling scsi_register. * * 12 Nov 2002 Rev. 8.02 for linux 2.5.47 * + Release driver_lock before calling scsi_register. * * 11 Nov 2002 Rev. 8.01 for linux 2.5.47 * + Fixed bios_param and scsicam_bios_param calling parameters. * * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4 * + Use new tcq and adjust_queue_depth api. * + New command line option (tm:[0-2]) to choose the type of tags: * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags. * Default is tm:0 (tagged commands disabled). * For compatibility the "tc:" option is an alias of the "tm:" * option; tc:n is equivalent to tm:0 and tc:y is equivalent to * tm:1. * * 10 Oct 2002 Rev. 7.70 for linux 2.5.42 * + Foreport from revision 6.70. * * 25 Jun 2002 Rev. 6.70 for linux 2.4.19 * + Fixed endian-ness problem due to bitfields. * * 21 Feb 2002 Rev. 6.52 for linux 2.4.18 * + Backport from rev. 7.22 (use io_request_lock). * * 20 Feb 2002 Rev. 7.22 for linux 2.5.5 * + Remove any reference to virt_to_bus(). * + Fix pio hang while detecting multiple HBAs. * * 01 Jan 2002 Rev. 7.20 for linux 2.5.1 * + Use the dynamic DMA mapping API. * * 19 Dec 2001 Rev. 7.02 for linux 2.5.1 * + Use SCpnt->sc_data_direction if set. * + Use sglist.page instead of sglist.address. * * 11 Dec 2001 Rev. 7.00 for linux 2.5.1 * + Use host->host_lock instead of io_request_lock. * * 1 May 2001 Rev. 6.05 for linux 2.4.4 * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d) * * 25 Jan 2001 Rev. 6.03 for linux 2.4.0 * + "check_region" call replaced by "request_region". * * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11 * + Removed old scsi error handling support. * + The obsolete boot option flag eh:n is silently ignored. * + Removed error messages while a disk drive is powered up at * boot time. * + Improved boot messages: all tagged capable device are * indicated as "tagged". * * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18 * + Updated to the new __setup interface for boot command line options. * + When loaded as a module, accepts the new parameter boot_options * which value is a string with the same format of the kernel boot * command line options. A valid example is: * modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"' * * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11 * + Removed pre-2.2 source code compatibility. * * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111 * Added command line option (et:[y|n]) to use the existing * translation (returned by scsicam_bios_param) as disk geometry. * The default is et:n, which uses the disk geometry jumpered * on the board. * The default value et:n is compatible with all previous revisions * of this driver. * * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104 * Increased busy timeout from 10 msec. to 200 msec. while * processing interrupts. * * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102 * Improved abort handling during the eh recovery process. * * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101 * The driver is now fully SMP safe, including the * abort and reset routines. * Added command line options (eh:[y|n]) to choose between * new_eh_code and the old scsi code. * If linux version >= 2.1.101 the default is eh:y, while the eh * option is ignored for previous releases and the old scsi code * is used. * * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97 * Reworked interrupt handler. * * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95 * Major reliability improvement: when a batch with overlapping * requests is detected, requests are queued one at a time * eliminating any possible board or drive reordering. * * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95 * Improved SMP support (if linux version >= 2.1.95). * * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94 * Performance improvement: when sequential i/o is detected, * always use direct sort instead of reverse sort. * * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92 * io_port is now unsigned long. * * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88 * Use new scsi error handling code (if linux version >= 2.1.88). * Use new interrupt code. * * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55 * Use of udelay inside the wait loops to avoid timeout * problems with fast cpus. * Removed check about useless calls to the interrupt service * routine (reported on SMP systems only). * At initialization time "sorted/unsorted" is displayed instead * of "linked/unlinked" to reinforce the fact that "linking" is * nothing but "elevator sorting" in the actual implementation. * * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38 * Use of serial_number_at_timeout in abort and reset processing. * Use of the __initfunc and __initdata macro in setup code. * Minor cleanups in the list_statistics code. * * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26 * When loading as a module, parameter passing is now supported * both in 2.0 and in 2.1 style. * Fixed data transfer direction for some SCSI opcodes. * Immediate acknowledge to request sense commands. * Linked commands to each disk device are now reordered by elevator * sorting. Rare cases in which reordering of write requests could * cause wrong results are managed. * * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28 * Added command line options to enable/disable linked commands * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8". * Improved command linking. * * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27 * Added linked command support. * * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27 * Added queue depth adjustment. * * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26 * The list of i/o ports to be probed can be overwritten by the * "u14-34f=port0,port1,...." boot command line option. * Scatter/gather lists are now allocated by a number of kmalloc * calls, in order to avoid the previous size limit of 64Kb. * * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25 * Added multichannel support. * * 27 Sep 1996 rev. 2.12 for linux 2.1.0 * Portability cleanups (virtual/bus addressing, little/big endian * support). * * 09 Jul 1996 rev. 2.11 for linux 2.0.4 * "Data over/under-run" no longer implies a redo on all targets. * Number of internal retries is now limited. * * 16 Apr 1996 rev. 2.10 for linux 1.3.90 * New argument "reset_flags" to the reset routine. * * 21 Jul 1995 rev. 2.02 for linux 1.3.11 * Fixed Data Transfer Direction for some SCSI commands. * * 13 Jun 1995 rev. 2.01 for linux 1.2.10 * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when * the firmware prom is not the latest one (28008-006). * * 11 Mar 1995 rev. 2.00 for linux 1.2.0 * Fixed a bug which prevented media change detection for removable * disk drives. * * 23 Feb 1995 rev. 1.18 for linux 1.1.94 * Added a check for scsi_register returning NULL. * * 11 Feb 1995 rev. 1.17 for linux 1.1.91 * U14F qualified to run with 32 sglists. * Now DEBUG_RESET is disabled by default. * * 9 Feb 1995 rev. 1.16 for linux 1.1.90 * Use host->wish_block instead of host->block. * * 8 Feb 1995 rev. 1.15 for linux 1.1.89 * Cleared target_time_out counter while performing a reset. * * 28 Jan 1995 rev. 1.14 for linux 1.1.86 * Added module support. * Log and do a retry when a disk drive returns a target status * different from zero on a recovered error. * Auto detects if U14F boards have an old firmware revision. * Max number of scatter/gather lists set to 16 for all boards * (most installation run fine using 33 sglists, while other * has problems when using more than 16). * * 16 Jan 1995 rev. 1.13 for linux 1.1.81 * Display a message if check_region detects a port address * already in use. * * 15 Dec 1994 rev. 1.12 for linux 1.1.74 * The host->block flag is set for all the detected ISA boards. * * 30 Nov 1994 rev. 1.11 for linux 1.1.68 * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only. * Added optional support for using a single board at a time. * * 14 Nov 1994 rev. 1.10 for linux 1.1.63 * * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release. * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release. * * This driver is a total replacement of the original UltraStor * scsi driver, but it supports ONLY the 14F and 34F boards. * It can be configured in the same kernel in which the original * ultrastor driver is configured to allow the original U24F * support. * * Multiple U14F and/or U34F host adapters are supported. * * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com) * * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. * * WARNING: if your 14/34F board has an old firmware revision (see below) * you must change "#undef" into "#define" in the following * statement. */ #undef HAVE_OLD_UX4F_FIRMWARE /* * The UltraStor 14F, 24F, and 34F are a family of intelligent, high * performance SCSI-2 host adapters. * Here is the scoop on the various models: * * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation. * 24F - EISA Bus Master HA with floppy support and WD1003 emulation. * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation). * * This code has been tested with up to two U14F boards, using both * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware * 28004-006/38004-005 (BIOS rev. 2.01). * * The latest firmware is required in order to get reliable operations when * clustering is enabled. ENABLE_CLUSTERING provides a performance increase * up to 50% on sequential access. * * Since the struct scsi_host_template structure is shared among all 14F and 34F, * the last setting of use_clustering is in effect for all of these boards. * * Here a sample configuration using two U14F boards: * U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8. U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8. * * The boot controller must have its BIOS enabled, while other boards can * have their BIOS disabled, or enabled to an higher address. * Boards are named Ux4F0, Ux4F1..., according to the port address order in * the io_port[] array. * * The following facts are based on real testing results (not on * documentation) on the above U14F board. * * - The U14F board should be jumpered for bus on time less or equal to 7 * microseconds, while the default is 11 microseconds. This is order to * get acceptable performance while using floppy drive and hard disk * together. The jumpering for 7 microseconds is: JP13 pin 15-16, * JP14 pin 7-8 and pin 9-10. * The reduction has a little impact on scsi performance. * * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8). * * - If U14F on board firmware is older than 28004-006/38004-005, * the U14F board is unable to provide reliable operations if the scsi * request length exceeds 16Kbyte. When this length is exceeded the * behavior is: * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94; * - adapter_status equal 0 and target_status equal 2 on for all targets * in the next operation following the reset. * This sequence takes a long time (>3 seconds), so in the meantime * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31). * Because of this I had to DISABLE_CLUSTERING and to work around the * bus reset in the interrupt service routine, returning DID_BUS_BUSY * so that the operations are retried without complains from the scsi.c * code. * Any reset of the scsi bus is going to kill tape operations, since * no retry is allowed for tapes. Bus resets are more likely when the * scsi bus is under heavy load. * Requests using scatter/gather have a maximum length of 16 x 1024 bytes * when DISABLE_CLUSTERING is in effect, but unscattered requests could be * larger than 16Kbyte. * * The new firmware has fixed all the above problems. * * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01), * the latest firmware prom is 28008-006. Older firmware 28008-005 has * problems when using more than 16 scatter/gather lists. * * The list of i/o ports to be probed can be totally replaced by the * boot command line option: "u14-34f=port0,port1,port2,...", where the * port0, port1... arguments are ISA/VESA addresses to be probed. * For example using "u14-34f=0x230,0x340", the driver probes only the two * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables * this driver. * * After the optional list of detection probes, other possible command line * options are: * * et:y use disk geometry returned by scsicam_bios_param; * et:n use disk geometry jumpered on the board; * lc:y enables linked commands; * lc:n disables linked commands; * tm:0 disables tagged commands (same as tc:n); * tm:1 use simple queue tags (same as tc:y); * tm:2 use ordered queue tags (same as tc:2); * of:y enables old firmware support; * of:n disables old firmware support; * mq:xx set the max queue depth to the value xx (2 <= xx <= 8). * * The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n". * An example using the list of detection probes could be: * "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n". * * When loading as a module, parameters can be specified as well. * The above example would be (use 1 in place of y and 0 in place of n): * * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \ * max_queue_depth=4 ext_tran=0 tag_mode=2 * * ---------------------------------------------------------------------------- * In this implementation, linked commands are designed to work with any DISK * or CD-ROM, since this linking has only the intent of clustering (time-wise) * and reordering by elevator sorting commands directed to each device, * without any relation with the actual SCSI protocol between the controller * and the device. * If Q is the queue depth reported at boot time for each device (also named * cmds/lun) and Q > 2, whenever there is already an active command to the * device all other commands to the same device (up to Q-1) are kept waiting * in the elevator sorting queue. When the active command completes, the * commands in this queue are sorted by sector address. The sort is chosen * between increasing or decreasing by minimizing the seek distance between * the sector of the commands just completed and the sector of the first * command in the list to be sorted. * Trivial math assures that the unsorted average seek distance when doing * random seeks over S sectors is S/3. * When (Q-1) requests are uniformly distributed over S sectors, the average * distance between two adjacent requests is S/((Q-1) + 1), so the sorted * average seek distance for (Q-1) random requests over S sectors is S/Q. * The elevator sorting hence divides the seek distance by a factor Q/3. * The above pure geometric remarks are valid in all cases and the * driver effectively reduces the seek distance by the predicted factor * when there are Q concurrent read i/o operations on the device, but this * does not necessarily results in a noticeable performance improvement: * your mileage may vary.... * * Note: command reordering inside a batch of queued commands could cause * wrong results only if there is at least one write request and the * intersection (sector-wise) of all requests is not empty. * When the driver detects a batch including overlapping requests * (a really rare event) strict serial (pid) order is enforced. * ---------------------------------------------------------------------------- * * The boards are named Ux4F0, Ux4F1,... according to the detection order. * * In order to support multiple ISA boards in a reliable way, * the driver sets host->wish_block = TRUE for all ISA boards. */ #include <linux/string.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/system.h> #include <asm/byteorder.h> #include <linux/proc_fs.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/dma.h> #include <asm/irq.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> static int u14_34f_detect(struct scsi_host_template *); static int u14_34f_release(struct Scsi_Host *); static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); static int u14_34f_eh_abort(struct scsi_cmnd *); static int u14_34f_eh_host_reset(struct scsi_cmnd *); static int u14_34f_bios_param(struct scsi_device *, struct block_device *, sector_t, int *); static int u14_34f_slave_configure(struct scsi_device *); static struct scsi_host_template driver_template = { .name = "UltraStor 14F/34F rev. 8.10.00 ", .detect = u14_34f_detect, .release = u14_34f_release, .queuecommand = u14_34f_queuecommand, .eh_abort_handler = u14_34f_eh_abort, .eh_host_reset_handler = u14_34f_eh_host_reset, .bios_param = u14_34f_bios_param, .slave_configure = u14_34f_slave_configure, .this_id = 7, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; #if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD) #error "Adjust your <asm/byteorder.h> defines" #endif /* Values for the PRODUCT_ID ports for the 14/34F */ #define PRODUCT_ID1 0x56 #define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */ /* Subversion values */ #define ISA 0 #define ESA 1 #define OP_HOST_ADAPTER 0x1 #define OP_SCSI 0x2 #define OP_RESET 0x4 #define DTD_SCSI 0x0 #define DTD_IN 0x1 #define DTD_OUT 0x2 #define DTD_NONE 0x3 #define HA_CMD_INQUIRY 0x1 #define HA_CMD_SELF_DIAG 0x2 #define HA_CMD_READ_BUFF 0x3 #define HA_CMD_WRITE_BUFF 0x4 #undef DEBUG_LINKED_COMMANDS #undef DEBUG_DETECT #undef DEBUG_INTERRUPT #undef DEBUG_RESET #undef DEBUG_GENERATE_ERRORS #undef DEBUG_GENERATE_ABORTS #undef DEBUG_GEOMETRY #define MAX_ISA 3 #define MAX_VESA 1 #define MAX_EISA 0 #define MAX_PCI 0 #define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI) #define MAX_CHANNEL 1 #define MAX_LUN 8 #define MAX_TARGET 8 #define MAX_MAILBOXES 16 #define MAX_SGLIST 32 #define MAX_SAFE_SGLIST 16 #define MAX_INTERNAL_RETRIES 64 #define MAX_CMD_PER_LUN 2 #define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN) #define SKIP ULONG_MAX #define FALSE 0 #define TRUE 1 #define FREE 0 #define IN_USE 1 #define LOCKED 2 #define IN_RESET 3 #define IGNORE 4 #define READY 5 #define ABORTING 6 #define NO_DMA 0xff #define MAXLOOP 10000 #define TAG_DISABLED 0 #define TAG_SIMPLE 1 #define TAG_ORDERED 2 #define REG_LCL_MASK 0 #define REG_LCL_INTR 1 #define REG_SYS_MASK 2 #define REG_SYS_INTR 3 #define REG_PRODUCT_ID1 4 #define REG_PRODUCT_ID2 5 #define REG_CONFIG1 6 #define REG_CONFIG2 7 #define REG_OGM 8 #define REG_ICM 12 #define REGION_SIZE 13UL #define BSY_ASSERTED 0x01 #define IRQ_ASSERTED 0x01 #define CMD_RESET 0xc0 #define CMD_OGM_INTR 0x01 #define CMD_CLR_INTR 0x01 #define CMD_ENA_INTR 0x81 #define ASOK 0x00 #define ASST 0x91 #define YESNO(a) ((a) ? 'y' : 'n') #define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM) #define PACKED __attribute__((packed)) struct sg_list { unsigned int address; /* Segment Address */ unsigned int num_bytes; /* Segment Length */ }; /* MailBox SCSI Command Packet */ struct mscp { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3; unsigned char lun: 3, channel:2, target:3; #else unsigned char opcode: 3, /* type of command */ xdir: 2, /* data transfer direction */ dcn: 1, /* disable disconnect */ ca: 1, /* use cache (if available) */ sg: 1; /* scatter/gather operation */ unsigned char target: 3, /* SCSI target id */ channel: 2, /* SCSI channel number */ lun: 3; /* SCSI logical unit number */ #endif unsigned int data_address PACKED; /* transfer data pointer */ unsigned int data_len PACKED; /* length in bytes */ unsigned int link_address PACKED; /* for linking command chains */ unsigned char clink_id; /* identifies command in chain */ unsigned char use_sg; /* (if sg is set) 8 bytes per list */ unsigned char sense_len; unsigned char cdb_len; /* 6, 10, or 12 */ unsigned char cdb[12]; /* SCSI Command Descriptor Block */ unsigned char adapter_status; /* non-zero indicates HA error */ unsigned char target_status; /* non-zero indicates target error */ unsigned int sense_addr PACKED; /* Additional fields begin here. */ struct scsi_cmnd *SCpnt; unsigned int cpp_index; /* cp index */ /* All the cp structure is zero filled by queuecommand except the following CP_TAIL_SIZE bytes, initialized by detect */ dma_addr_t cp_dma_addr; /* dma handle for this cp structure */ struct sg_list *sglist; /* pointer to the allocated SG list */ }; #define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t)) struct hostdata { struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */ unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */ unsigned int last_cp_used; /* Index of last mailbox used */ unsigned int iocount; /* Total i/o done for this board */ int board_number; /* Number of this board */ char board_name[16]; /* Name of this board */ int in_reset; /* True if board is doing a reset */ int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */ int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */ unsigned int retries; /* Number of internal retries */ unsigned long last_retried_pid; /* Pid of last retried command */ unsigned char subversion; /* Bus type, either ISA or ESA */ struct pci_dev *pdev; /* Always NULL */ unsigned char heads; unsigned char sectors; char board_id[256]; /* data from INQUIRY on this board */ }; static struct Scsi_Host *sh[MAX_BOARDS + 1]; static const char *driver_name = "Ux4F"; static char sha[MAX_BOARDS]; static DEFINE_SPINLOCK(driver_lock); /* Initialize num_boards so that ihdlr can work while detect is in progress */ static unsigned int num_boards = MAX_BOARDS; static unsigned long io_port[] = { /* Space for MAX_INT_PARAM ports usable while loading as a module */ SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, /* Possible ISA/VESA ports */ 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140, /* End of list */ 0x0 }; #define HD(board) ((struct hostdata *) &sh[board]->hostdata) #define BN(board) (HD(board)->board_name) /* Device is Little Endian */ #define H2DEV(x) cpu_to_le32(x) #define DEV2H(x) le32_to_cpu(x) static irqreturn_t do_interrupt_handler(int, void *); static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int); static int do_trace = FALSE; static int setup_done = FALSE; static int link_statistics; static int ext_tran = FALSE; #if defined(HAVE_OLD_UX4F_FIRMWARE) static int have_old_firmware = TRUE; #else static int have_old_firmware = FALSE; #endif #if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE) static int tag_mode = TAG_SIMPLE; #else static int tag_mode = TAG_DISABLED; #endif #if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS) static int linked_comm = TRUE; #else static int linked_comm = FALSE; #endif #if defined(CONFIG_SCSI_U14_34F_MAX_TAGS) static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS; #else static int max_queue_depth = MAX_CMD_PER_LUN; #endif #define MAX_INT_PARAM 10 #define MAX_BOOT_OPTIONS_SIZE 256 static char boot_options[MAX_BOOT_OPTIONS_SIZE]; #if defined(MODULE) #include <linux/module.h> #include <linux/moduleparam.h> module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0); MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \ "option." \ " Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\""); MODULE_AUTHOR("Dario Ballabio"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver"); #endif static int u14_34f_slave_configure(struct scsi_device *dev) { int j, tqd, utqd; char *tag_suffix, *link_suffix; struct Scsi_Host *host = dev->host; j = ((struct hostdata *) host->hostdata)->board_number; utqd = MAX_CMD_PER_LUN; tqd = max_queue_depth; if (TLDEV(dev->type) && dev->tagged_supported) if (tag_mode == TAG_SIMPLE) { scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, tqd); tag_suffix = ", simple tags"; } else if (tag_mode == TAG_ORDERED) { scsi_adjust_queue_depth(dev, MSG_ORDERED_TAG, tqd); tag_suffix = ", ordered tags"; } else { scsi_adjust_queue_depth(dev, 0, tqd); tag_suffix = ", no tags"; } else if (TLDEV(dev->type) && linked_comm) { scsi_adjust_queue_depth(dev, 0, tqd); tag_suffix = ", untagged"; } else { scsi_adjust_queue_depth(dev, 0, utqd); tag_suffix = ""; } if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2) link_suffix = ", sorted"; else if (TLDEV(dev->type)) link_suffix = ", unsorted"; else link_suffix = ""; sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n", dev->queue_depth, link_suffix, tag_suffix); return FALSE; } static int wait_on_busy(unsigned long iobase, unsigned int loop) { while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) { udelay(1L); if (--loop == 0) return TRUE; } return FALSE; } static int board_inquiry(unsigned int j) { struct mscp *cpp; dma_addr_t id_dma_addr; unsigned int limit = 0; unsigned long time; id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id, sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); cpp = &HD(j)->cp[0]; cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE); cpp->opcode = OP_HOST_ADAPTER; cpp->xdir = DTD_IN; cpp->data_address = H2DEV(id_dma_addr); cpp->data_len = H2DEV(sizeof(HD(j)->board_id)); cpp->cdb_len = 6; cpp->cdb[0] = HA_CMD_INQUIRY; if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: board_inquiry, adapter busy.\n", BN(j)); return TRUE; } HD(j)->cp_stat[0] = IGNORE; /* Clear the interrupt indication */ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); /* Store pointer in OGM address bytes */ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); /* Issue OGM interrupt */ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); spin_unlock_irq(&driver_lock); time = jiffies; while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L); spin_lock_irq(&driver_lock); if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) { HD(j)->cp_stat[0] = FREE; printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status); return TRUE; } pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL); return FALSE; } static int port_detect \ (unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) { unsigned char irq, dma_channel, subversion, i; unsigned char in_byte; char *bus_type, dma_name[16]; /* Allowed BIOS base addresses (NULL indicates reserved) */ unsigned long bios_segment_table[8] = { 0, 0xc4000, 0xc8000, 0xcc000, 0xd0000, 0xd4000, 0xd8000, 0xdc000 }; /* Allowed IRQs */ unsigned char interrupt_table[4] = { 15, 14, 11, 10 }; /* Allowed DMA channels for ISA (0 indicates reserved) */ unsigned char dma_channel_table[4] = { 5, 6, 7, 0 }; /* Head/sector mappings */ struct { unsigned char heads; unsigned char sectors; } mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } }; struct config_1 { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char dma_channel: 2, interrupt:2, removable_disks_as_fixed:1, bios_segment: 3; #else unsigned char bios_segment: 3, removable_disks_as_fixed: 1, interrupt: 2, dma_channel: 2; #endif } config_1; struct config_2 { #if defined(__BIG_ENDIAN_BITFIELD) unsigned char tfr_port: 2, bios_drive_number: 1, mapping_mode: 2, ha_scsi_id: 3; #else unsigned char ha_scsi_id: 3, mapping_mode: 2, bios_drive_number: 1, tfr_port: 2; #endif } config_2; char name[16]; sprintf(name, "%s%d", driver_name, j); if (!request_region(port_base, REGION_SIZE, driver_name)) { #if defined(DEBUG_DETECT) printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base); #endif goto fail; } spin_lock_irq(&driver_lock); if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock; in_byte = inb(port_base + REG_PRODUCT_ID2); if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock; *(char *)&config_1 = inb(port_base + REG_CONFIG1); *(char *)&config_2 = inb(port_base + REG_CONFIG2); irq = interrupt_table[config_1.interrupt]; dma_channel = dma_channel_table[config_1.dma_channel]; subversion = (in_byte & 0x0f); /* Board detected, allocate its IRQ */ if (request_irq(irq, do_interrupt_handler, IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), driver_name, (void *) &sha[j])) { printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); goto freelock; } if (subversion == ISA && request_dma(dma_channel, driver_name)) { printk("%s: unable to allocate DMA channel %u, detaching.\n", name, dma_channel); goto freeirq; } if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING; spin_unlock_irq(&driver_lock); sh[j] = scsi_register(tpnt, sizeof(struct hostdata)); spin_lock_irq(&driver_lock); if (sh[j] == NULL) { printk("%s: unable to register host, detaching.\n", name); goto freedma; } sh[j]->io_port = port_base; sh[j]->unique_id = port_base; sh[j]->n_io_port = REGION_SIZE; sh[j]->base = bios_segment_table[config_1.bios_segment]; sh[j]->irq = irq; sh[j]->sg_tablesize = MAX_SGLIST; sh[j]->this_id = config_2.ha_scsi_id; sh[j]->can_queue = MAX_MAILBOXES; sh[j]->cmd_per_lun = MAX_CMD_PER_LUN; #if defined(DEBUG_DETECT) { unsigned char sys_mask, lcl_mask; sys_mask = inb(sh[j]->io_port + REG_SYS_MASK); lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK); printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask); } #endif /* Probably a bogus host scsi id, set it to the dummy value */ if (sh[j]->this_id == 0) sh[j]->this_id = -1; /* If BIOS is disabled, force enable interrupts */ if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK); memset(HD(j), 0, sizeof(struct hostdata)); HD(j)->heads = mapping_table[config_2.mapping_mode].heads; HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors; HD(j)->subversion = subversion; HD(j)->pdev = NULL; HD(j)->board_number = j; if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST; if (HD(j)->subversion == ESA) { sh[j]->unchecked_isa_dma = FALSE; sh[j]->dma_channel = NO_DMA; sprintf(BN(j), "U34F%d", j); bus_type = "VESA"; } else { unsigned long flags; sh[j]->unchecked_isa_dma = TRUE; flags=claim_dma_lock(); disable_dma(dma_channel); clear_dma_ff(dma_channel); set_dma_mode(dma_channel, DMA_MODE_CASCADE); enable_dma(dma_channel); release_dma_lock(flags); sh[j]->dma_channel = dma_channel; sprintf(BN(j), "U14F%d", j); bus_type = "ISA"; } sh[j]->max_channel = MAX_CHANNEL - 1; sh[j]->max_id = MAX_TARGET; sh[j]->max_lun = MAX_LUN; if (HD(j)->subversion == ISA && !board_inquiry(j)) { HD(j)->board_id[40] = 0; if (strcmp(&HD(j)->board_id[32], "06000600")) { printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]); printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n", BN(j), &HD(j)->board_id[32]); sh[j]->hostt->use_clustering = DISABLE_CLUSTERING; sh[j]->sg_tablesize = MAX_SAFE_SGLIST; } } if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST"); else sprintf(dma_name, "DMA %u", dma_channel); spin_unlock_irq(&driver_lock); for (i = 0; i < sh[j]->can_queue; i++) HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev, &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); for (i = 0; i < sh[j]->can_queue; i++) if (! ((&HD(j)->cp[i])->sglist = kmalloc( sh[j]->sg_tablesize * sizeof(struct sg_list), (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) { printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i); goto release; } if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN) max_queue_depth = MAX_TAGGED_CMD_PER_LUN; if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN; if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE) tag_mode = TAG_ORDERED; if (j == 0) { printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n"); printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n", driver_name, YESNO(have_old_firmware), tag_mode, YESNO(linked_comm), max_queue_depth, YESNO(ext_tran)); } printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n", BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base, sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue); if (sh[j]->max_id > 8 || sh[j]->max_lun > 8) printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n", BN(j), sh[j]->max_id, sh[j]->max_lun); for (i = 0; i <= sh[j]->max_channel; i++) printk("%s: SCSI channel %u enabled, host target ID %d.\n", BN(j), i, sh[j]->this_id); return TRUE; freedma: if (subversion == ISA) free_dma(dma_channel); freeirq: free_irq(irq, &sha[j]); freelock: spin_unlock_irq(&driver_lock); release_region(port_base, REGION_SIZE); fail: return FALSE; release: u14_34f_release(sh[j]); return FALSE; } static void internal_setup(char *str, int *ints) { int i, argc = ints[0]; char *cur = str, *pc; if (argc > 0) { if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM; for (i = 0; i < argc; i++) io_port[i] = ints[i + 1]; io_port[i] = 0; setup_done = TRUE; } while (cur && (pc = strchr(cur, ':'))) { int val = 0, c = *++pc; if (c == 'n' || c == 'N') val = FALSE; else if (c == 'y' || c == 'Y') val = TRUE; else val = (int) simple_strtoul(pc, NULL, 0); if (!strncmp(cur, "lc:", 3)) linked_comm = val; else if (!strncmp(cur, "of:", 3)) have_old_firmware = val; else if (!strncmp(cur, "tm:", 3)) tag_mode = val; else if (!strncmp(cur, "tc:", 3)) tag_mode = val; else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val; else if (!strncmp(cur, "ls:", 3)) link_statistics = val; else if (!strncmp(cur, "et:", 3)) ext_tran = val; if ((cur = strchr(cur, ','))) ++cur; } return; } static int option_setup(char *str) { int ints[MAX_INT_PARAM]; char *cur = str; int i = 1; while (cur && isdigit(*cur) && i < MAX_INT_PARAM) { ints[i++] = simple_strtoul(cur, NULL, 0); if ((cur = strchr(cur, ',')) != NULL) cur++; } ints[0] = i - 1; internal_setup(cur, ints); return 1; } static int u14_34f_detect(struct scsi_host_template *tpnt) { unsigned int j = 0, k; tpnt->proc_name = "u14-34f"; if(strlen(boot_options)) option_setup(boot_options); #if defined(MODULE) /* io_port could have been modified when loading as a module */ if(io_port[0] != SKIP) { setup_done = TRUE; io_port[MAX_INT_PARAM] = 0; } #endif for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL; for (k = 0; io_port[k]; k++) { if (io_port[k] == SKIP) continue; if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++; } num_boards = j; return j; } static void map_dma(unsigned int i, unsigned int j) { unsigned int data_len = 0; unsigned int k, pci_dir; int count; struct scatterlist *sg; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (SCpnt->sense_buffer) cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE)); cpp->sense_len = SCSI_SENSE_BUFFERSIZE; if (scsi_bufflen(SCpnt)) { count = scsi_dma_map(SCpnt); BUG_ON(count < 0); scsi_for_each_sg(SCpnt, sg, count, k) { cpp->sglist[k].address = H2DEV(sg_dma_address(sg)); cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg)); data_len += sg->length; } cpp->sg = TRUE; cpp->use_sg = scsi_sg_count(SCpnt); cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist, cpp->use_sg * sizeof(struct sg_list), pci_dir)); cpp->data_len = H2DEV(data_len); } else { pci_dir = PCI_DMA_BIDIRECTIONAL; cpp->data_len = H2DEV(scsi_bufflen(SCpnt)); } } static void unmap_dma(unsigned int i, unsigned int j) { unsigned int pci_dir; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (DEV2H(cpp->sense_addr)) pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr), DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); scsi_dma_unmap(SCpnt); if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; if (DEV2H(cpp->data_address)) pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address), DEV2H(cpp->data_len), pci_dir); } static void sync_dma(unsigned int i, unsigned int j) { unsigned int pci_dir; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; pci_dir = SCpnt->sc_data_direction; if (DEV2H(cpp->sense_addr)) pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr), DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); if (scsi_sg_count(SCpnt)) pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), pci_dir); if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; if (DEV2H(cpp->data_address)) pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address), DEV2H(cpp->data_len), pci_dir); } static void scsi_to_dev_dir(unsigned int i, unsigned int j) { unsigned int k; static const unsigned char data_out_cmds[] = { 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e, 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40, 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d }; static const unsigned char data_none_cmds[] = { 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e, 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47, 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00 }; struct mscp *cpp; struct scsi_cmnd *SCpnt; cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt; if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { cpp->xdir = DTD_IN; return; } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { cpp->xdir = DTD_OUT; return; } else if (SCpnt->sc_data_direction == DMA_NONE) { cpp->xdir = DTD_NONE; return; } if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL) panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j)); cpp->xdir = DTD_IN; for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++) if (SCpnt->cmnd[0] == data_out_cmds[k]) { cpp->xdir = DTD_OUT; break; } if (cpp->xdir == DTD_IN) for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++) if (SCpnt->cmnd[0] == data_none_cmds[k]) { cpp->xdir = DTD_NONE; break; } } static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { unsigned int i, j, k; struct mscp *cpp; /* j is the board number */ j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number; if (SCpnt->host_scribble) panic("%s: qcomm, pid %ld, SCpnt %p already active.\n", BN(j), SCpnt->serial_number, SCpnt); /* i is the mailbox number, look for the first free mailbox starting from last_cp_used */ i = HD(j)->last_cp_used + 1; for (k = 0; k < sh[j]->can_queue; k++, i++) { if (i >= sh[j]->can_queue) i = 0; if (HD(j)->cp_stat[i] == FREE) { HD(j)->last_cp_used = i; break; } } if (k == sh[j]->can_queue) { printk("%s: qcomm, no free mailbox.\n", BN(j)); return 1; } /* Set pointer to control packet structure */ cpp = &HD(j)->cp[i]; memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE); SCpnt->scsi_done = done; cpp->cpp_index = i; SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index; if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d, pid %ld.\n", BN(j), i, SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, SCpnt->serial_number); cpp->opcode = OP_SCSI; cpp->channel = SCpnt->device->channel; cpp->target = SCpnt->device->id; cpp->lun = SCpnt->device->lun; cpp->SCpnt = SCpnt; cpp->cdb_len = SCpnt->cmd_len; memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len); /* Use data transfer direction SCpnt->sc_data_direction */ scsi_to_dev_dir(i, j); /* Map DMA buffers and SG list */ map_dma(i, j); if (linked_comm && SCpnt->device->queue_depth > 2 && TLDEV(SCpnt->device->type)) { HD(j)->cp_stat[i] = READY; flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE); return 0; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { unmap_dma(i, j); SCpnt->host_scribble = NULL; scmd_printk(KERN_INFO, SCpnt, "qcomm, pid %ld, adapter busy.\n", SCpnt->serial_number); return 1; } /* Store pointer in OGM address bytes */ outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); /* Issue OGM interrupt */ outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); HD(j)->cp_stat[i] = IN_USE; return 0; } static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { unsigned int i, j; j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; if (SCarg->host_scribble == NULL) { scmd_printk(KERN_INFO, SCarg, "abort, pid %ld inactive.\n", SCarg->serial_number); return SUCCESS; } i = *(unsigned int *)SCarg->host_scribble; scmd_printk(KERN_INFO, SCarg, "abort, mbox %d, pid %ld.\n", i, SCarg->serial_number); if (i >= sh[j]->can_queue) panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j)); if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: abort, timeout error.\n", BN(j)); return FAILED; } if (HD(j)->cp_stat[i] == FREE) { printk("%s: abort, mbox %d is free.\n", BN(j), i); return SUCCESS; } if (HD(j)->cp_stat[i] == IN_USE) { printk("%s: abort, mbox %d is in use.\n", BN(j), i); if (SCarg != HD(j)->cp[i].SCpnt) panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n", BN(j), i, SCarg, HD(j)->cp[i].SCpnt); if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED) printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i); return FAILED; } if (HD(j)->cp_stat[i] == IN_RESET) { printk("%s: abort, mbox %d is in reset.\n", BN(j), i); return FAILED; } if (HD(j)->cp_stat[i] == LOCKED) { printk("%s: abort, mbox %d is locked.\n", BN(j), i); return SUCCESS; } if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { unmap_dma(i, j); SCarg->result = DID_ABORT << 16; SCarg->host_scribble = NULL; HD(j)->cp_stat[i] = FREE; printk("%s, abort, mbox %d ready, DID_ABORT, pid %ld done.\n", BN(j), i, SCarg->serial_number); SCarg->scsi_done(SCarg); return SUCCESS; } panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i); } static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) { unsigned int i, j, k, c, limit = 0; unsigned long time; int arg_done = FALSE; struct scsi_cmnd *SCpnt; j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number; scmd_printk(KERN_INFO, SCarg, "reset, enter, pid %ld.\n", SCarg->serial_number); spin_lock_irq(sh[j]->host_lock); if (SCarg->host_scribble == NULL) printk("%s: reset, pid %ld inactive.\n", BN(j), SCarg->serial_number); if (HD(j)->in_reset) { printk("%s: reset, exit, already in reset.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: reset, exit, timeout error.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } HD(j)->retries = 0; for (c = 0; c <= sh[j]->max_channel; c++) for (k = 0; k < sh[j]->max_id; k++) { HD(j)->target_redo[k][c] = TRUE; HD(j)->target_to[k][c] = 0; } for (i = 0; i < sh[j]->can_queue; i++) { if (HD(j)->cp_stat[i] == FREE) continue; if (HD(j)->cp_stat[i] == LOCKED) { HD(j)->cp_stat[i] = FREE; printk("%s: reset, locked mbox %d forced free.\n", BN(j), i); continue; } if (!(SCpnt = HD(j)->cp[i].SCpnt)) panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i); if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) { HD(j)->cp_stat[i] = ABORTING; printk("%s: reset, mbox %d aborting, pid %ld.\n", BN(j), i, SCpnt->serial_number); } else { HD(j)->cp_stat[i] = IN_RESET; printk("%s: reset, mbox %d in reset, pid %ld.\n", BN(j), i, SCpnt->serial_number); } if (SCpnt->host_scribble == NULL) panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i); if (*(unsigned int *)SCpnt->host_scribble != i) panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i); if (SCpnt->scsi_done == NULL) panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i); if (SCpnt == SCarg) arg_done = TRUE; } if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { printk("%s: reset, cannot reset, timeout error.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return FAILED; } outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR); printk("%s: reset, board reset done, enabling interrupts.\n", BN(j)); #if defined(DEBUG_RESET) do_trace = TRUE; #endif HD(j)->in_reset = TRUE; spin_unlock_irq(sh[j]->host_lock); time = jiffies; while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L); spin_lock_irq(sh[j]->host_lock); printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit); for (i = 0; i < sh[j]->can_queue; i++) { if (HD(j)->cp_stat[i] == IN_RESET) { SCpnt = HD(j)->cp[i].SCpnt; unmap_dma(i, j); SCpnt->result = DID_RESET << 16; SCpnt->host_scribble = NULL; /* This mailbox is still waiting for its interrupt */ HD(j)->cp_stat[i] = LOCKED; printk("%s, reset, mbox %d locked, DID_RESET, pid %ld done.\n", BN(j), i, SCpnt->serial_number); } else if (HD(j)->cp_stat[i] == ABORTING) { SCpnt = HD(j)->cp[i].SCpnt; unmap_dma(i, j); SCpnt->result = DID_RESET << 16; SCpnt->host_scribble = NULL; /* This mailbox was never queued to the adapter */ HD(j)->cp_stat[i] = FREE; printk("%s, reset, mbox %d aborting, DID_RESET, pid %ld done.\n", BN(j), i, SCpnt->serial_number); } else /* Any other mailbox has already been set free by interrupt */ continue; SCpnt->scsi_done(SCpnt); } HD(j)->in_reset = FALSE; do_trace = FALSE; if (arg_done) printk("%s: reset, exit, pid %ld done.\n", BN(j), SCarg->serial_number); else printk("%s: reset, exit.\n", BN(j)); spin_unlock_irq(sh[j]->host_lock); return SUCCESS; } static int u14_34f_bios_param(struct scsi_device *disk, struct block_device *bdev, sector_t capacity, int *dkinfo) { unsigned int j = 0; unsigned int size = capacity; dkinfo[0] = HD(j)->heads; dkinfo[1] = HD(j)->sectors; dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors); if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) { dkinfo[0] = 255; dkinfo[1] = 63; dkinfo[2] = size / (dkinfo[0] * dkinfo[1]); } #if defined (DEBUG_GEOMETRY) printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name, dkinfo[0], dkinfo[1], dkinfo[2]); #endif return FALSE; } static void sort(unsigned long sk[], unsigned int da[], unsigned int n, unsigned int rev) { unsigned int i, j, k, y; unsigned long x; for (i = 0; i < n - 1; i++) { k = i; for (j = k + 1; j < n; j++) if (rev) { if (sk[j] > sk[k]) k = j; } else { if (sk[j] < sk[k]) k = j; } if (k != i) { x = sk[k]; sk[k] = sk[i]; sk[i] = x; y = da[k]; da[k] = da[i]; da[i] = y; } } return; } static int reorder(unsigned int j, unsigned long cursec, unsigned int ihdlr, unsigned int il[], unsigned int n_ready) { struct scsi_cmnd *SCpnt; struct mscp *cpp; unsigned int k, n; unsigned int rev = FALSE, s = TRUE, r = TRUE; unsigned int input_only = TRUE, overlap = FALSE; unsigned long sl[n_ready], pl[n_ready], ll[n_ready]; unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0; unsigned long ioseek = 0; static unsigned int flushcount = 0, batchcount = 0, sortcount = 0; static unsigned int readycount = 0, ovlcount = 0, inputcount = 0; static unsigned int readysorted = 0, revcount = 0; static unsigned long seeksorted = 0, seeknosort = 0; if (link_statistics && !(++flushcount % link_statistics)) printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\ " av %ldK as %ldK.\n", flushcount, batchcount, inputcount, ovlcount, readycount, readysorted, sortcount, revcount, seeknosort / (readycount + 1), seeksorted / (readycount + 1)); if (n_ready <= 1) return FALSE; for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (!(cpp->xdir == DTD_IN)) input_only = FALSE; if (blk_rq_pos(SCpnt->request) < minsec) minsec = blk_rq_pos(SCpnt->request); if (blk_rq_pos(SCpnt->request) > maxsec) maxsec = blk_rq_pos(SCpnt->request); sl[n] = blk_rq_pos(SCpnt->request); ioseek += blk_rq_sectors(SCpnt->request); if (!n) continue; if (sl[n] < sl[n - 1]) s = FALSE; if (sl[n] > sl[n - 1]) r = FALSE; if (link_statistics) { if (sl[n] > sl[n - 1]) seek += sl[n] - sl[n - 1]; else seek += sl[n - 1] - sl[n]; } } if (link_statistics) { if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec; } if (cursec > ((maxsec + minsec) / 2)) rev = TRUE; if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE; if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev); if (!input_only) for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number; if (!n) continue; if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n])) || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE; } if (overlap) sort(pl, il, n_ready, FALSE); if (link_statistics) { if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec; batchcount++; readycount += n_ready; seeknosort += seek / 1024; if (input_only) inputcount++; if (overlap) { ovlcount++; seeksorted += iseek / 1024; } else seeksorted += (iseek + maxsec - minsec) / 1024; if (rev && !r) { revcount++; readysorted += n_ready; } if (!rev && !s) { sortcount++; readysorted += n_ready; } } #if defined(DEBUG_LINKED_COMMANDS) if (link_statistics && (overlap || !(flushcount % link_statistics))) for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), YESNO(overlap), cpp->xdir); } #endif return overlap; } static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j, unsigned int ihdlr) { struct scsi_cmnd *SCpnt; struct mscp *cpp; unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES]; for (k = 0; k < sh[j]->can_queue; k++) { if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (SCpnt->device != dev) continue; if (HD(j)->cp_stat[k] == IN_USE) return; il[n_ready++] = k; } if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1; for (n = 0; n < n_ready; n++) { k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; if (wait_on_busy(sh[j]->io_port, MAXLOOP)) { scmd_printk(KERN_INFO, SCpnt, "%s, pid %ld, mbox %d, adapter" " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"), SCpnt->serial_number, k); HD(j)->cp_stat[k] = ABORTING; continue; } outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM); outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR); HD(j)->cp_stat[k] = IN_USE; } } static irqreturn_t ihdlr(unsigned int j) { struct scsi_cmnd *SCpnt; unsigned int i, k, c, status, tstatus, reg, ret; struct mscp *spp, *cpp; int irq = sh[j]->irq; /* Check if this board need to be serviced */ if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none; HD(j)->iocount++; if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq, HD(j)->iocount); /* Check if this board is still busy */ if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) { outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n", BN(j), irq, reg, HD(j)->iocount); goto none; } ret = inl(sh[j]->io_port + REG_ICM); /* Clear interrupt pending flag */ outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR); /* Find the mailbox to be serviced on this board */ for (i = 0; i < sh[j]->can_queue; i++) if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break; if (i >= sh[j]->can_queue) panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j), (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr)); cpp = &(HD(j)->cp[i]); spp = cpp; #if defined(DEBUG_GENERATE_ABORTS) if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled; #endif if (HD(j)->cp_stat[i] == IGNORE) { HD(j)->cp_stat[i] = FREE; goto handled; } else if (HD(j)->cp_stat[i] == LOCKED) { HD(j)->cp_stat[i] = FREE; printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i, HD(j)->iocount); goto handled; } else if (HD(j)->cp_stat[i] == FREE) { printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i, HD(j)->iocount); goto handled; } else if (HD(j)->cp_stat[i] == IN_RESET) printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i); else if (HD(j)->cp_stat[i] != IN_USE) panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n", BN(j), i, HD(j)->cp_stat[i]); HD(j)->cp_stat[i] = FREE; SCpnt = cpp->SCpnt; if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i); if (SCpnt->host_scribble == NULL) panic("%s: ihdlr, mbox %d, pid %ld, SCpnt %p garbled.\n", BN(j), i, SCpnt->serial_number, SCpnt); if (*(unsigned int *)SCpnt->host_scribble != i) panic("%s: ihdlr, mbox %d, pid %ld, index mismatch %d.\n", BN(j), i, SCpnt->serial_number, *(unsigned int *)SCpnt->host_scribble); sync_dma(i, j); if (linked_comm && SCpnt->device->queue_depth > 2 && TLDEV(SCpnt->device->type)) flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE); tstatus = status_byte(spp->target_status); #if defined(DEBUG_GENERATE_ERRORS) if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2)) spp->adapter_status = 0x01; #endif switch (spp->adapter_status) { case ASOK: /* status OK */ /* Forces a reset if a disk drive keeps returning BUSY */ if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE) status = DID_ERROR << 16; /* If there was a bus reset, redo operation on each target */ else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK && HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)]) status = DID_BUS_BUSY << 16; /* Works around a flaw in scsi.c */ else if (tstatus == CHECK_CONDITION && SCpnt->device->type == TYPE_DISK && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR) status = DID_BUS_BUSY << 16; else status = DID_OK << 16; if (tstatus == GOOD) HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE; if (spp->target_status && SCpnt->device->type == TYPE_DISK && (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 && (SCpnt->sense_buffer[2] & 0xf) == NOT_READY))) scmd_printk(KERN_INFO, SCpnt, "ihdlr, pid %ld, target_status 0x%x, sense key 0x%x.\n", SCpnt->serial_number, spp->target_status, SCpnt->sense_buffer[2]); HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0; if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0; break; case ASST: /* Selection Time Out */ if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1) status = DID_ERROR << 16; else { status = DID_TIME_OUT << 16; HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++; } break; /* Perform a limited number of internal retries */ case 0x93: /* Unexpected bus free */ case 0x94: /* Target bus phase sequence failure */ case 0x96: /* Illegal SCSI command */ case 0xa3: /* SCSI bus reset error */ for (c = 0; c <= sh[j]->max_channel; c++) for (k = 0; k < sh[j]->max_id; k++) HD(j)->target_redo[k][c] = TRUE; case 0x92: /* Data over/under-run */ if (SCpnt->device->type != TYPE_TAPE && HD(j)->retries < MAX_INTERNAL_RETRIES) { #if defined(DID_SOFT_ERROR) status = DID_SOFT_ERROR << 16; #else status = DID_BUS_BUSY << 16; #endif HD(j)->retries++; HD(j)->last_retried_pid = SCpnt->serial_number; } else status = DID_ERROR << 16; break; case 0x01: /* Invalid command */ case 0x02: /* Invalid parameters */ case 0x03: /* Invalid data list */ case 0x84: /* SCSI bus abort error */ case 0x9b: /* Auto request sense error */ case 0x9f: /* Unexpected command complete message error */ case 0xff: /* Invalid parameter in the S/G list */ default: status = DID_ERROR << 16; break; } SCpnt->result = status | spp->target_status; #if defined(DEBUG_INTERRUPT) if (SCpnt->result || do_trace) #else if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) || (spp->adapter_status != ASOK && spp->adapter_status != ASST && HD(j)->iocount <= 1000) || do_trace || msg_byte(spp->target_status)) #endif scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\ " pid %ld, reg 0x%x, count %d.\n", i, spp->adapter_status, spp->target_status, SCpnt->serial_number, reg, HD(j)->iocount); unmap_dma(i, j); /* Set the command state to inactive */ SCpnt->host_scribble = NULL; SCpnt->scsi_done(SCpnt); if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq, HD(j)->iocount); handled: return IRQ_HANDLED; none: return IRQ_NONE; } static irqreturn_t do_interrupt_handler(int irq, void *shap) { unsigned int j; unsigned long spin_flags; irqreturn_t ret; /* Check if the interrupt must be processed by this handler */ if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE; spin_lock_irqsave(sh[j]->host_lock, spin_flags); ret = ihdlr(j); spin_unlock_irqrestore(sh[j]->host_lock, spin_flags); return ret; } static int u14_34f_release(struct Scsi_Host *shpnt) { unsigned int i, j; for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++); if (sh[j] == NULL) panic("%s: release, invalid Scsi_Host pointer.\n", driver_name); for (i = 0; i < sh[j]->can_queue; i++) kfree((&HD(j)->cp[i])->sglist); for (i = 0; i < sh[j]->can_queue; i++) pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr, sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL); free_irq(sh[j]->irq, &sha[j]); if (sh[j]->dma_channel != NO_DMA) free_dma(sh[j]->dma_channel); release_region(sh[j]->io_port, sh[j]->n_io_port); scsi_unregister(sh[j]); return FALSE; } #include "scsi_module.c" #ifndef MODULE __setup("u14-34f=", option_setup); #endif /* end MODULE */
gpl-2.0
KyleAMathews/fastsocket
kernel/drivers/net/wireless/iwlwifi/dvm/power.c
2176
12719
/****************************************************************************** * * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> #include "iwl-io.h" #include "iwl-debug.h" #include "iwl-trans.h" #include "iwl-modparams.h" #include "dev.h" #include "agn.h" #include "commands.h" #include "power.h" /* * Setting power level allows the card to go to sleep when not busy. * * We calculate a sleep command based on the required latency, which * we get from mac80211. In order to handle thermal throttling, we can * also use pre-defined power levels. */ /* * This defines the old power levels. They are still used by default * (level 1) and for thermal throttle (levels 3 through 5) */ struct iwl_power_vec_entry { struct iwl_powertable_cmd cmd; u8 no_dtim; /* number of skip dtim */ }; #define IWL_DTIM_RANGE_0_MAX 2 #define IWL_DTIM_RANGE_1_MAX 10 #define NOSLP cpu_to_le16(0), 0, 0 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 #define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \ IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \ IWL_POWER_ADVANCE_PM_ENA_MSK) #define ASLP_TOUT(T) cpu_to_le32(T) #define TU_TO_USEC 1024 #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ cpu_to_le32(X1), \ cpu_to_le32(X2), \ cpu_to_le32(X3), \ cpu_to_le32(X4)} /* default power management (not Tx power) table values */ /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ /* DTIM 0 - 2 */ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} }; /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ /* DTIM 3 - 10 */ static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} }; /* for DTIM period > IWL_DTIM_RANGE_1_MAX */ /* DTIM 11 - */ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} }; /* advance power management */ /* DTIM 0 - 2 */ static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} }; /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ /* DTIM 3 - 10 */ static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2} }; /* for DTIM period > IWL_DTIM_RANGE_1_MAX */ /* DTIM 11 - */ static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} }; static void iwl_static_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, enum iwl_power_level lvl, int period) { const struct iwl_power_vec_entry *table; int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; int i; u8 skip; u32 slp_itrvl; if (priv->cfg->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = apm_range_0; } else { table = range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = range_0; } if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM)) memset(cmd, 0, sizeof(*cmd)); else *cmd = table[lvl].cmd; if (period == 0) { skip = 0; period = 1; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = 1; } else { skip = table[lvl].no_dtim; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); /* figure out the listen interval based on dtim period and skip */ if (slp_itrvl == 0xFF) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(period * (skip + 1)); slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > period) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32((slp_itrvl / period) * period); if (skip) cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; if (priv->cfg->base_params->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); /* enforce max sleep interval */ for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { if (le32_to_cpu(cmd->sleep_interval[i]) > (max_sleep[i] * period)) cmd->sleep_interval[i] = cpu_to_le32(max_sleep[i] * period); if (i != (IWL_POWER_VEC_SIZE - 1)) { if (le32_to_cpu(cmd->sleep_interval[i]) > le32_to_cpu(cmd->sleep_interval[i+1])) cmd->sleep_interval[i] = cmd->sleep_interval[i+1]; } } if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; else cmd->flags &= ~IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", skip, period); /* The power level here is 0-4 (used as array index), but user expects to see 1-5 (according to spec). */ IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); } static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { memset(cmd, 0, sizeof(*cmd)); if (priv->power_data.bus_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); } static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n", le32_to_cpu(cmd->sleep_interval[0]), le32_to_cpu(cmd->sleep_interval[1]), le32_to_cpu(cmd->sleep_interval[2]), le32_to_cpu(cmd->sleep_interval[3]), le32_to_cpu(cmd->sleep_interval[4])); return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC, sizeof(struct iwl_powertable_cmd), cmd); } static void iwl_power_build_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; int dtimper; dtimper = priv->hw->conf.ps_dtim_period ?: 1; if (priv->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); else if (!priv->cfg->base_params->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { /* in thermal throttling low power state */ iwl_static_sleep_cmd(priv, cmd, iwl_tt_current_power_mode(priv), dtimper); } else if (!enabled) iwl_power_sleep_cam_cmd(priv, cmd); else if (priv->power_data.debug_sleep_level_override >= 0) iwl_static_sleep_cmd(priv, cmd, priv->power_data.debug_sleep_level_override, dtimper); else { /* Note that the user parameter is 1-5 (according to spec), but we pass 0-4 because it acts as an array index. */ if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 && iwlwifi_mod_params.power_level <= IWL_POWER_NUM) iwl_static_sleep_cmd(priv, cmd, iwlwifi_mod_params.power_level - 1, dtimper); else iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper); } } int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, bool force) { int ret; bool update_chains; lockdep_assert_held(&priv->mutex); /* Don't update the RX chain when chain noise calibration is running */ update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) return 0; if (!iwl_is_ready_rf(priv)) return -EIO; /* scan complete use sleep_power_next, need to be updated */ memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); if (test_bit(STATUS_SCANNING, &priv->status) && !force) { IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); return 0; } if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) iwl_dvm_set_pmi(priv, true); ret = iwl_set_power(priv, cmd); if (!ret) { if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) iwl_dvm_set_pmi(priv, false); if (update_chains) iwl_update_chain_flags(priv); else IWL_DEBUG_POWER(priv, "Cannot update the power, chain noise " "calibration running: %d\n", priv->chain_noise_data.state); memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); } else IWL_ERR(priv, "set power fail, ret = %d", ret); return ret; } int iwl_power_update_mode(struct iwl_priv *priv, bool force) { struct iwl_powertable_cmd cmd; iwl_power_build_cmd(priv, &cmd); return iwl_power_set_mode(priv, &cmd, force); } /* initialize to default */ void iwl_power_initialize(struct iwl_priv *priv) { priv->power_data.bus_pm = priv->trans->pm_support; priv->power_data.debug_sleep_level_override = -1; memset(&priv->power_data.sleep_cmd, 0, sizeof(priv->power_data.sleep_cmd)); }
gpl-2.0
nspierbundel/test
drivers/media/radio/radio-terratec.c
2688
10136
/* Terratec ActiveRadio ISA Standalone card driver for Linux radio support * (c) 1999 R. Offermanns (rolf@offermanns.de) * based on the aimslab radio driver from M. Kirkwood * many thanks to Michael Becker and Friedhelm Birth (from TerraTec) * * * History: * 1999-05-21 First preview release * * Notes on the hardware: * There are two "main" chips on the card: * - Philips OM5610 (http://www-us.semiconductors.philips.com/acrobat/datasheets/OM5610_2.pdf) * - Philips SAA6588 (http://www-us.semiconductors.philips.com/acrobat/datasheets/SAA6588_1.pdf) * (you can get the datasheet at the above links) * * Frequency control is done digitally -- ie out(port,encodefreq(95.8)); * Volume Control is done digitally * * there is a I2C controlled RDS decoder (SAA6588) onboard, which i would like to support someday * (as soon i have understand how to get started :) * If you can help me out with that, please contact me!! * * * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/mutex.h> #include <linux/version.h> /* for KERNEL_VERSION MACRO */ #include <linux/io.h> /* outb, outb_p */ #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> MODULE_AUTHOR("R.OFFERMANNS & others"); MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card."); MODULE_LICENSE("GPL"); #ifndef CONFIG_RADIO_TERRATEC_PORT #define CONFIG_RADIO_TERRATEC_PORT 0x590 #endif static int io = CONFIG_RADIO_TERRATEC_PORT; static int radio_nr = -1; module_param(io, int, 0); MODULE_PARM_DESC(io, "I/O address of the TerraTec ActiveRadio card (0x590 or 0x591)"); module_param(radio_nr, int, 0); #define RADIO_VERSION KERNEL_VERSION(0, 0, 2) static struct v4l2_queryctrl radio_qctrl[] = { { .id = V4L2_CID_AUDIO_MUTE, .name = "Mute", .minimum = 0, .maximum = 1, .default_value = 1, .type = V4L2_CTRL_TYPE_BOOLEAN, },{ .id = V4L2_CID_AUDIO_VOLUME, .name = "Volume", .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0xff, .type = V4L2_CTRL_TYPE_INTEGER, } }; #define WRT_DIS 0x00 #define CLK_OFF 0x00 #define IIC_DATA 0x01 #define IIC_CLK 0x02 #define DATA 0x04 #define CLK_ON 0x08 #define WRT_EN 0x10 struct terratec { struct v4l2_device v4l2_dev; struct video_device vdev; int io; int curvol; unsigned long curfreq; int muted; struct mutex lock; }; static struct terratec terratec_card; /* local things */ static void tt_write_vol(struct terratec *tt, int volume) { int i; volume = volume + (volume * 32); /* change both channels */ mutex_lock(&tt->lock); for (i = 0; i < 8; i++) { if (volume & (0x80 >> i)) outb(0x80, tt->io + 1); else outb(0x00, tt->io + 1); } mutex_unlock(&tt->lock); } static void tt_mute(struct terratec *tt) { tt->muted = 1; tt_write_vol(tt, 0); } static int tt_setvol(struct terratec *tt, int vol) { if (vol == tt->curvol) { /* requested volume = current */ if (tt->muted) { /* user is unmuting the card */ tt->muted = 0; tt_write_vol(tt, vol); /* enable card */ } return 0; } if (vol == 0) { /* volume = 0 means mute the card */ tt_write_vol(tt, 0); /* "turn off card" by setting vol to 0 */ tt->curvol = vol; /* track the volume state! */ return 0; } tt->muted = 0; tt_write_vol(tt, vol); tt->curvol = vol; return 0; } /* this is the worst part in this driver */ /* many more or less strange things are going on here, but hey, it works :) */ static int tt_setfreq(struct terratec *tt, unsigned long freq1) { int freq; int i; int p; int temp; long rest; unsigned char buffer[25]; /* we have to bit shift 25 registers */ mutex_lock(&tt->lock); tt->curfreq = freq1; freq = freq1 / 160; /* convert the freq. to a nice to handle value */ memset(buffer, 0, sizeof(buffer)); rest = freq * 10 + 10700; /* I once had understood what is going on here */ /* maybe some wise guy (friedhelm?) can comment this stuff */ i = 13; p = 10; temp = 102400; while (rest != 0) { if (rest % temp == rest) buffer[i] = 0; else { buffer[i] = 1; rest = rest - temp; } i--; p--; temp = temp / 2; } for (i = 24; i > -1; i--) { /* bit shift the values to the radiocard */ if (buffer[i] == 1) { outb(WRT_EN | DATA, tt->io); outb(WRT_EN | DATA | CLK_ON, tt->io); outb(WRT_EN | DATA, tt->io); } else { outb(WRT_EN | 0x00, tt->io); outb(WRT_EN | 0x00 | CLK_ON, tt->io); } } outb(0x00, tt->io); mutex_unlock(&tt->lock); return 0; } static int tt_getsigstr(struct terratec *tt) { if (inb(tt->io) & 2) /* bit set = no signal present */ return 0; return 1; /* signal present */ } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { strlcpy(v->driver, "radio-terratec", sizeof(v->driver)); strlcpy(v->card, "ActiveRadio", sizeof(v->card)); strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); v->version = RADIO_VERSION; v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct terratec *tt = video_drvdata(file); if (v->index > 0) return -EINVAL; strlcpy(v->name, "FM", sizeof(v->name)); v->type = V4L2_TUNER_RADIO; v->rangelow = 87 * 16000; v->rangehigh = 108 * 16000; v->rxsubchans = V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW; v->audmode = V4L2_TUNER_MODE_MONO; v->signal = 0xFFFF * tt_getsigstr(tt); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct terratec *tt = video_drvdata(file); if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; tt_setfreq(tt, f->frequency); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct terratec *tt = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = tt->curfreq; return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { if (qc->id && qc->id == radio_qctrl[i].id) { memcpy(qc, &(radio_qctrl[i]), sizeof(*qc)); return 0; } } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct terratec *tt = video_drvdata(file); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (tt->muted) ctrl->value = 1; else ctrl->value = 0; return 0; case V4L2_CID_AUDIO_VOLUME: ctrl->value = tt->curvol * 6554; return 0; } return -EINVAL; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct terratec *tt = video_drvdata(file); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (ctrl->value) tt_mute(tt); else tt_setvol(tt,tt->curvol); return 0; case V4L2_CID_AUDIO_VOLUME: tt_setvol(tt,ctrl->value); return 0; } return -EINVAL; } static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *filp, void *priv, unsigned int i) { return i ? -EINVAL : 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { a->index = 0; strlcpy(a->name, "Radio", sizeof(a->name)); a->capability = V4L2_AUDCAP_STEREO; return 0; } static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { return a->index ? -EINVAL : 0; } static const struct v4l2_file_operations terratec_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops terratec_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, }; static int __init terratec_init(void) { struct terratec *tt = &terratec_card; struct v4l2_device *v4l2_dev = &tt->v4l2_dev; int res; strlcpy(v4l2_dev->name, "terratec", sizeof(v4l2_dev->name)); tt->io = io; if (tt->io == -1) { v4l2_err(v4l2_dev, "you must set an I/O address with io=0x590 or 0x591\n"); return -EINVAL; } if (!request_region(tt->io, 2, "terratec")) { v4l2_err(v4l2_dev, "port 0x%x already in use\n", io); return -EBUSY; } res = v4l2_device_register(NULL, v4l2_dev); if (res < 0) { release_region(tt->io, 2); v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); return res; } strlcpy(tt->vdev.name, v4l2_dev->name, sizeof(tt->vdev.name)); tt->vdev.v4l2_dev = v4l2_dev; tt->vdev.fops = &terratec_fops; tt->vdev.ioctl_ops = &terratec_ioctl_ops; tt->vdev.release = video_device_release_empty; video_set_drvdata(&tt->vdev, tt); mutex_init(&tt->lock); /* mute card - prevents noisy bootups */ tt_write_vol(tt, 0); if (video_register_device(&tt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(&tt->v4l2_dev); release_region(tt->io, 2); return -EINVAL; } v4l2_info(v4l2_dev, "TERRATEC ActivRadio Standalone card driver.\n"); return 0; } static void __exit terratec_exit(void) { struct terratec *tt = &terratec_card; struct v4l2_device *v4l2_dev = &tt->v4l2_dev; video_unregister_device(&tt->vdev); v4l2_device_unregister(&tt->v4l2_dev); release_region(tt->io, 2); v4l2_info(v4l2_dev, "TERRATEC ActivRadio Standalone card driver unloaded.\n"); } module_init(terratec_init); module_exit(terratec_exit);
gpl-2.0
SlimRoms/kernel_samsung_t1
net/xfrm/xfrm_state.c
2944
54809
/* * xfrm_state.c * * Changes: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * YOSHIFUJI Hideaki @USAGI * Split up af-specific functions * Derek Atkins <derek@ihtfp.com> * Add UDP Encapsulation * */ #include <linux/workqueue.h> #include <net/xfrm.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/audit.h> #include <asm/uaccess.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include "xfrm_hash.h" /* Each xfrm_state may be linked to two tables: 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 2. Hash table by (daddr,family,reqid) to find what SAs exist for given destination/tunnel endpoint. (output) */ static DEFINE_SPINLOCK(xfrm_state_lock); static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); static inline unsigned int xfrm_dst_hash(struct net *net, const xfrm_address_t *daddr, const xfrm_address_t *saddr, u32 reqid, unsigned short family) { return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask); } static inline unsigned int xfrm_src_hash(struct net *net, const xfrm_address_t *daddr, const xfrm_address_t *saddr, unsigned short family) { return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask); } static inline unsigned int xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) { return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); } static void xfrm_hash_transfer(struct hlist_head *list, struct hlist_head *ndsttable, struct hlist_head *nsrctable, struct hlist_head *nspitable, unsigned int nhashmask) { struct hlist_node *entry, *tmp; struct xfrm_state *x; hlist_for_each_entry_safe(x, entry, tmp, list, bydst) { unsigned int h; h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family, nhashmask); hlist_add_head(&x->bydst, ndsttable+h); h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family, nhashmask); hlist_add_head(&x->bysrc, nsrctable+h); if (x->id.spi) { h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family, nhashmask); hlist_add_head(&x->byspi, nspitable+h); } } } static unsigned long xfrm_hash_new_size(unsigned int state_hmask) { return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); } static DEFINE_MUTEX(hash_resize_mutex); static void xfrm_hash_resize(struct work_struct *work) { struct net *net = container_of(work, struct net, xfrm.state_hash_work); struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; unsigned long nsize, osize; unsigned int nhashmask, ohashmask; int i; mutex_lock(&hash_resize_mutex); nsize = xfrm_hash_new_size(net->xfrm.state_hmask); ndst = xfrm_hash_alloc(nsize); if (!ndst) goto out_unlock; nsrc = xfrm_hash_alloc(nsize); if (!nsrc) { xfrm_hash_free(ndst, nsize); goto out_unlock; } nspi = xfrm_hash_alloc(nsize); if (!nspi) { xfrm_hash_free(ndst, nsize); xfrm_hash_free(nsrc, nsize); goto out_unlock; } spin_lock_bh(&xfrm_state_lock); nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; for (i = net->xfrm.state_hmask; i >= 0; i--) xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi, nhashmask); odst = net->xfrm.state_bydst; osrc = net->xfrm.state_bysrc; ospi = net->xfrm.state_byspi; ohashmask = net->xfrm.state_hmask; net->xfrm.state_bydst = ndst; net->xfrm.state_bysrc = nsrc; net->xfrm.state_byspi = nspi; net->xfrm.state_hmask = nhashmask; spin_unlock_bh(&xfrm_state_lock); osize = (ohashmask + 1) * sizeof(struct hlist_head); xfrm_hash_free(odst, osize); xfrm_hash_free(osrc, osize); xfrm_hash_free(ospi, osize); out_unlock: mutex_unlock(&hash_resize_mutex); } static DEFINE_RWLOCK(xfrm_state_afinfo_lock); static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; static DEFINE_SPINLOCK(xfrm_state_gc_lock); int __xfrm_state_delete(struct xfrm_state *x); int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); void km_state_expired(struct xfrm_state *x, int hard, u32 pid); static struct xfrm_state_afinfo *xfrm_state_lock_afinfo(unsigned int family) { struct xfrm_state_afinfo *afinfo; if (unlikely(family >= NPROTO)) return NULL; write_lock_bh(&xfrm_state_afinfo_lock); afinfo = xfrm_state_afinfo[family]; if (unlikely(!afinfo)) write_unlock_bh(&xfrm_state_afinfo_lock); return afinfo; } static void xfrm_state_unlock_afinfo(struct xfrm_state_afinfo *afinfo) __releases(xfrm_state_afinfo_lock) { write_unlock_bh(&xfrm_state_afinfo_lock); } int xfrm_register_type(const struct xfrm_type *type, unsigned short family) { struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); const struct xfrm_type **typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; if (likely(typemap[type->proto] == NULL)) typemap[type->proto] = type; else err = -EEXIST; xfrm_state_unlock_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_register_type); int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) { struct xfrm_state_afinfo *afinfo = xfrm_state_lock_afinfo(family); const struct xfrm_type **typemap; int err = 0; if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; typemap = afinfo->type_map; if (unlikely(typemap[type->proto] != type)) err = -ENOENT; else typemap[type->proto] = NULL; xfrm_state_unlock_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_unregister_type); static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) { struct xfrm_state_afinfo *afinfo; const struct xfrm_type **typemap; const struct xfrm_type *type; int modload_attempted = 0; retry: afinfo = xfrm_state_get_afinfo(family); if (unlikely(afinfo == NULL)) return NULL; typemap = afinfo->type_map; type = typemap[proto]; if (unlikely(type && !try_module_get(type->owner))) type = NULL; if (!type && !modload_attempted) { xfrm_state_put_afinfo(afinfo); request_module("xfrm-type-%d-%d", family, proto); modload_attempted = 1; goto retry; } xfrm_state_put_afinfo(afinfo); return type; } static void xfrm_put_type(const struct xfrm_type *type) { module_put(type->owner); } int xfrm_register_mode(struct xfrm_mode *mode, int family) { struct xfrm_state_afinfo *afinfo; struct xfrm_mode **modemap; int err; if (unlikely(mode->encap >= XFRM_MODE_MAX)) return -EINVAL; afinfo = xfrm_state_lock_afinfo(family); if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; err = -EEXIST; modemap = afinfo->mode_map; if (modemap[mode->encap]) goto out; err = -ENOENT; if (!try_module_get(afinfo->owner)) goto out; mode->afinfo = afinfo; modemap[mode->encap] = mode; err = 0; out: xfrm_state_unlock_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_register_mode); int xfrm_unregister_mode(struct xfrm_mode *mode, int family) { struct xfrm_state_afinfo *afinfo; struct xfrm_mode **modemap; int err; if (unlikely(mode->encap >= XFRM_MODE_MAX)) return -EINVAL; afinfo = xfrm_state_lock_afinfo(family); if (unlikely(afinfo == NULL)) return -EAFNOSUPPORT; err = -ENOENT; modemap = afinfo->mode_map; if (likely(modemap[mode->encap] == mode)) { modemap[mode->encap] = NULL; module_put(mode->afinfo->owner); err = 0; } xfrm_state_unlock_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_unregister_mode); static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) { struct xfrm_state_afinfo *afinfo; struct xfrm_mode *mode; int modload_attempted = 0; if (unlikely(encap >= XFRM_MODE_MAX)) return NULL; retry: afinfo = xfrm_state_get_afinfo(family); if (unlikely(afinfo == NULL)) return NULL; mode = afinfo->mode_map[encap]; if (unlikely(mode && !try_module_get(mode->owner))) mode = NULL; if (!mode && !modload_attempted) { xfrm_state_put_afinfo(afinfo); request_module("xfrm-mode-%d-%d", family, encap); modload_attempted = 1; goto retry; } xfrm_state_put_afinfo(afinfo); return mode; } static void xfrm_put_mode(struct xfrm_mode *mode) { module_put(mode->owner); } static void xfrm_state_gc_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); kfree(x->aalg); kfree(x->ealg); kfree(x->calg); kfree(x->encap); kfree(x->coaddr); kfree(x->replay_esn); kfree(x->preplay_esn); if (x->inner_mode) xfrm_put_mode(x->inner_mode); if (x->inner_mode_iaf) xfrm_put_mode(x->inner_mode_iaf); if (x->outer_mode) xfrm_put_mode(x->outer_mode); if (x->type) { x->type->destructor(x); xfrm_put_type(x->type); } security_xfrm_state_free(x); kfree(x); } static void xfrm_state_gc_task(struct work_struct *work) { struct net *net = container_of(work, struct net, xfrm.state_gc_work); struct xfrm_state *x; struct hlist_node *entry, *tmp; struct hlist_head gc_list; spin_lock_bh(&xfrm_state_gc_lock); hlist_move_list(&net->xfrm.state_gc_list, &gc_list); spin_unlock_bh(&xfrm_state_gc_lock); hlist_for_each_entry_safe(x, entry, tmp, &gc_list, gclist) xfrm_state_gc_destroy(x); wake_up(&net->xfrm.km_waitq); } static inline unsigned long make_jiffies(long secs) { if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) return MAX_SCHEDULE_TIMEOUT-1; else return secs*HZ; } static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) { struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); struct net *net = xs_net(x); unsigned long now = get_seconds(); long next = LONG_MAX; int warn = 0; int err = 0; spin_lock(&x->lock); if (x->km.state == XFRM_STATE_DEAD) goto out; if (x->km.state == XFRM_STATE_EXPIRED) goto expired; if (x->lft.hard_add_expires_seconds) { long tmo = x->lft.hard_add_expires_seconds + x->curlft.add_time - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (x->lft.hard_use_expires_seconds) { long tmo = x->lft.hard_use_expires_seconds + (x->curlft.use_time ? : now) - now; if (tmo <= 0) goto expired; if (tmo < next) next = tmo; } if (x->km.dying) goto resched; if (x->lft.soft_add_expires_seconds) { long tmo = x->lft.soft_add_expires_seconds + x->curlft.add_time - now; if (tmo <= 0) warn = 1; else if (tmo < next) next = tmo; } if (x->lft.soft_use_expires_seconds) { long tmo = x->lft.soft_use_expires_seconds + (x->curlft.use_time ? : now) - now; if (tmo <= 0) warn = 1; else if (tmo < next) next = tmo; } x->km.dying = warn; if (warn) km_state_expired(x, 0, 0); resched: if (next != LONG_MAX){ tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); } goto out; expired: if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) { x->km.state = XFRM_STATE_EXPIRED; wake_up(&net->xfrm.km_waitq); next = 2; goto resched; } err = __xfrm_state_delete(x); if (!err && x->id.spi) km_state_expired(x, 1, 0); xfrm_audit_state_delete(x, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); out: spin_unlock(&x->lock); return HRTIMER_NORESTART; } static void xfrm_replay_timer_handler(unsigned long data); struct xfrm_state *xfrm_state_alloc(struct net *net) { struct xfrm_state *x; x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); if (x) { write_pnet(&x->xs_net, net); atomic_set(&x->refcnt, 1); atomic_set(&x->tunnel_users, 0); INIT_LIST_HEAD(&x->km.all); INIT_HLIST_NODE(&x->bydst); INIT_HLIST_NODE(&x->bysrc); INIT_HLIST_NODE(&x->byspi); tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS); setup_timer(&x->rtimer, xfrm_replay_timer_handler, (unsigned long)x); x->curlft.add_time = get_seconds(); x->lft.soft_byte_limit = XFRM_INF; x->lft.soft_packet_limit = XFRM_INF; x->lft.hard_byte_limit = XFRM_INF; x->lft.hard_packet_limit = XFRM_INF; x->replay_maxage = 0; x->replay_maxdiff = 0; x->inner_mode = NULL; x->inner_mode_iaf = NULL; spin_lock_init(&x->lock); } return x; } EXPORT_SYMBOL(xfrm_state_alloc); void __xfrm_state_destroy(struct xfrm_state *x) { struct net *net = xs_net(x); WARN_ON(x->km.state != XFRM_STATE_DEAD); spin_lock_bh(&xfrm_state_gc_lock); hlist_add_head(&x->gclist, &net->xfrm.state_gc_list); spin_unlock_bh(&xfrm_state_gc_lock); schedule_work(&net->xfrm.state_gc_work); } EXPORT_SYMBOL(__xfrm_state_destroy); int __xfrm_state_delete(struct xfrm_state *x) { struct net *net = xs_net(x); int err = -ESRCH; if (x->km.state != XFRM_STATE_DEAD) { x->km.state = XFRM_STATE_DEAD; spin_lock(&xfrm_state_lock); list_del(&x->km.all); hlist_del(&x->bydst); hlist_del(&x->bysrc); if (x->id.spi) hlist_del(&x->byspi); net->xfrm.state_num--; spin_unlock(&xfrm_state_lock); /* All xfrm_state objects are created by xfrm_state_alloc. * The xfrm_state_alloc call gives a reference, and that * is what we are dropping here. */ xfrm_state_put(x); err = 0; } return err; } EXPORT_SYMBOL(__xfrm_state_delete); int xfrm_state_delete(struct xfrm_state *x) { int err; spin_lock_bh(&x->lock); err = __xfrm_state_delete(x); spin_unlock_bh(&x->lock); return err; } EXPORT_SYMBOL(xfrm_state_delete); #ifdef CONFIG_SECURITY_NETWORK_XFRM static inline int xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) { int i, err = 0; for (i = 0; i <= net->xfrm.state_hmask; i++) { struct hlist_node *entry; struct xfrm_state *x; hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { if (xfrm_id_proto_match(x->id.proto, proto) && (err = security_xfrm_state_delete(x)) != 0) { xfrm_audit_state_delete(x, 0, audit_info->loginuid, audit_info->sessionid, audit_info->secid); return err; } } } return err; } #else static inline int xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audit_info) { return 0; } #endif int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info) { int i, err = 0, cnt = 0; spin_lock_bh(&xfrm_state_lock); err = xfrm_state_flush_secctx_check(net, proto, audit_info); if (err) goto out; err = -ESRCH; for (i = 0; i <= net->xfrm.state_hmask; i++) { struct hlist_node *entry; struct xfrm_state *x; restart: hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { if (!xfrm_state_kern(x) && xfrm_id_proto_match(x->id.proto, proto)) { xfrm_state_hold(x); spin_unlock_bh(&xfrm_state_lock); err = xfrm_state_delete(x); xfrm_audit_state_delete(x, err ? 0 : 1, audit_info->loginuid, audit_info->sessionid, audit_info->secid); xfrm_state_put(x); if (!err) cnt++; spin_lock_bh(&xfrm_state_lock); goto restart; } } } if (cnt) err = 0; out: spin_unlock_bh(&xfrm_state_lock); wake_up(&net->xfrm.km_waitq); return err; } EXPORT_SYMBOL(xfrm_state_flush); void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) { spin_lock_bh(&xfrm_state_lock); si->sadcnt = net->xfrm.state_num; si->sadhcnt = net->xfrm.state_hmask; si->sadhmcnt = xfrm_state_hashmax; spin_unlock_bh(&xfrm_state_lock); } EXPORT_SYMBOL(xfrm_sad_getinfo); static int xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr, unsigned short family) { struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); if (!afinfo) return -1; afinfo->init_tempsel(&x->sel, fl); if (family != tmpl->encap_family) { xfrm_state_put_afinfo(afinfo); afinfo = xfrm_state_get_afinfo(tmpl->encap_family); if (!afinfo) return -1; } afinfo->init_temprop(x, tmpl, daddr, saddr); xfrm_state_put_afinfo(afinfo); return 0; } static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) { unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); struct xfrm_state *x; struct hlist_node *entry; hlist_for_each_entry(x, entry, net->xfrm.state_byspi+h, byspi) { if (x->props.family != family || x->id.spi != spi || x->id.proto != proto || xfrm_addr_cmp(&x->id.daddr, daddr, family)) continue; if ((mark & x->mark.m) != x->mark.v) continue; xfrm_state_hold(x); return x; } return NULL; } static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, const xfrm_address_t *daddr, const xfrm_address_t *saddr, u8 proto, unsigned short family) { unsigned int h = xfrm_src_hash(net, daddr, saddr, family); struct xfrm_state *x; struct hlist_node *entry; hlist_for_each_entry(x, entry, net->xfrm.state_bysrc+h, bysrc) { if (x->props.family != family || x->id.proto != proto || xfrm_addr_cmp(&x->id.daddr, daddr, family) || xfrm_addr_cmp(&x->props.saddr, saddr, family)) continue; if ((mark & x->mark.m) != x->mark.v) continue; xfrm_state_hold(x); return x; } return NULL; } static inline struct xfrm_state * __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) { struct net *net = xs_net(x); u32 mark = x->mark.v & x->mark.m; if (use_spi) return __xfrm_state_lookup(net, mark, &x->id.daddr, x->id.spi, x->id.proto, family); else return __xfrm_state_lookup_byaddr(net, mark, &x->id.daddr, &x->props.saddr, x->id.proto, family); } static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) { if (have_hash_collision && (net->xfrm.state_hmask + 1) < xfrm_state_hashmax && net->xfrm.state_num > net->xfrm.state_hmask) schedule_work(&net->xfrm.state_hash_work); } static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, const struct flowi *fl, unsigned short family, struct xfrm_state **best, int *acq_in_progress, int *error) { /* Resolution logic: * 1. There is a valid state with matching selector. Done. * 2. Valid state with inappropriate selector. Skip. * * Entering area of "sysdeps". * * 3. If state is not valid, selector is temporary, it selects * only session which triggered previous resolution. Key * manager will do something to install a state with proper * selector. */ if (x->km.state == XFRM_STATE_VALID) { if ((x->sel.family && !xfrm_selector_match(&x->sel, fl, x->sel.family)) || !security_xfrm_state_pol_flow_match(x, pol, fl)) return; if (!*best || (*best)->km.dying > x->km.dying || ((*best)->km.dying == x->km.dying && (*best)->curlft.add_time < x->curlft.add_time)) *best = x; } else if (x->km.state == XFRM_STATE_ACQ) { *acq_in_progress = 1; } else if (x->km.state == XFRM_STATE_ERROR || x->km.state == XFRM_STATE_EXPIRED) { if (xfrm_selector_match(&x->sel, fl, x->sel.family) && security_xfrm_state_pol_flow_match(x, pol, fl)) *error = -ESRCH; } } struct xfrm_state * xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, const struct flowi *fl, struct xfrm_tmpl *tmpl, struct xfrm_policy *pol, int *err, unsigned short family) { static xfrm_address_t saddr_wildcard = { }; struct net *net = xp_net(pol); unsigned int h, h_wildcard; struct hlist_node *entry; struct xfrm_state *x, *x0, *to_put; int acquire_in_progress = 0; int error = 0; struct xfrm_state *best = NULL; u32 mark = pol->mark.v & pol->mark.m; unsigned short encap_family = tmpl->encap_family; to_put = NULL; spin_lock_bh(&xfrm_state_lock); h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_state_addr_check(x, daddr, saddr, encap_family) && tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, encap_family, &best, &acquire_in_progress, &error); } if (best) goto found; h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_state_addr_check(x, daddr, saddr, encap_family) && tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) xfrm_state_look_at(pol, x, fl, encap_family, &best, &acquire_in_progress, &error); } found: x = best; if (!x && !error && !acquire_in_progress) { if (tmpl->id.spi && (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, tmpl->id.proto, encap_family)) != NULL) { to_put = x0; error = -EEXIST; goto out; } x = xfrm_state_alloc(net); if (x == NULL) { error = -ENOMEM; goto out; } /* Initialize temporary state matching only * to current session. */ xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); memcpy(&x->mark, &pol->mark, sizeof(x->mark)); error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); if (error) { x->km.state = XFRM_STATE_DEAD; to_put = x; x = NULL; goto out; } if (km_query(x, tmpl, pol) == 0) { x->km.state = XFRM_STATE_ACQ; list_add(&x->km.all, &net->xfrm.state_all); hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); h = xfrm_src_hash(net, daddr, saddr, encap_family); hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); if (x->id.spi) { h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); } x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); } else { x->km.state = XFRM_STATE_DEAD; to_put = x; x = NULL; error = -ESRCH; } } out: if (x) xfrm_state_hold(x); else *err = acquire_in_progress ? -EAGAIN : error; spin_unlock_bh(&xfrm_state_lock); if (to_put) xfrm_state_put(to_put); return x; } struct xfrm_state * xfrm_stateonly_find(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, u8 mode, u8 proto, u32 reqid) { unsigned int h; struct xfrm_state *rx = NULL, *x = NULL; struct hlist_node *entry; spin_lock(&xfrm_state_lock); h = xfrm_dst_hash(net, daddr, saddr, reqid, family); hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { if (x->props.family == family && x->props.reqid == reqid && (mark & x->mark.m) == x->mark.v && !(x->props.flags & XFRM_STATE_WILDRECV) && xfrm_state_addr_check(x, daddr, saddr, family) && mode == x->props.mode && proto == x->id.proto && x->km.state == XFRM_STATE_VALID) { rx = x; break; } } if (rx) xfrm_state_hold(rx); spin_unlock(&xfrm_state_lock); return rx; } EXPORT_SYMBOL(xfrm_stateonly_find); static void __xfrm_state_insert(struct xfrm_state *x) { struct net *net = xs_net(x); unsigned int h; list_add(&x->km.all, &net->xfrm.state_all); h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); if (x->id.spi) { h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); } tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); if (x->replay_maxage) mod_timer(&x->rtimer, jiffies + x->replay_maxage); wake_up(&net->xfrm.km_waitq); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); } /* xfrm_state_lock is held */ static void __xfrm_state_bump_genids(struct xfrm_state *xnew) { struct net *net = xs_net(xnew); unsigned short family = xnew->props.family; u32 reqid = xnew->props.reqid; struct xfrm_state *x; struct hlist_node *entry; unsigned int h; u32 mark = xnew->mark.v & xnew->mark.m; h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { if (x->props.family == family && x->props.reqid == reqid && (mark & x->mark.m) == x->mark.v && !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) x->genid++; } } void xfrm_state_insert(struct xfrm_state *x) { spin_lock_bh(&xfrm_state_lock); __xfrm_state_bump_genids(x); __xfrm_state_insert(x); spin_unlock_bh(&xfrm_state_lock); } EXPORT_SYMBOL(xfrm_state_insert); /* xfrm_state_lock is held */ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create) { unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); struct hlist_node *entry; struct xfrm_state *x; u32 mark = m->v & m->m; hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { if (x->props.reqid != reqid || x->props.mode != mode || x->props.family != family || x->km.state != XFRM_STATE_ACQ || x->id.spi != 0 || x->id.proto != proto || (mark & x->mark.m) != x->mark.v || xfrm_addr_cmp(&x->id.daddr, daddr, family) || xfrm_addr_cmp(&x->props.saddr, saddr, family)) continue; xfrm_state_hold(x); return x; } if (!create) return NULL; x = xfrm_state_alloc(net); if (likely(x)) { switch (family) { case AF_INET: x->sel.daddr.a4 = daddr->a4; x->sel.saddr.a4 = saddr->a4; x->sel.prefixlen_d = 32; x->sel.prefixlen_s = 32; x->props.saddr.a4 = saddr->a4; x->id.daddr.a4 = daddr->a4; break; case AF_INET6: ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, (const struct in6_addr *)daddr); ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, (const struct in6_addr *)saddr); x->sel.prefixlen_d = 128; x->sel.prefixlen_s = 128; ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, (const struct in6_addr *)saddr); ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, (const struct in6_addr *)daddr); break; } x->km.state = XFRM_STATE_ACQ; x->id.proto = proto; x->props.family = family; x->props.mode = mode; x->props.reqid = reqid; x->mark.v = m->v; x->mark.m = m->m; x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; xfrm_state_hold(x); tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); list_add(&x->km.all, &net->xfrm.state_all); hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); h = xfrm_src_hash(net, daddr, saddr, family); hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); } return x; } static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); int xfrm_state_add(struct xfrm_state *x) { struct net *net = xs_net(x); struct xfrm_state *x1, *to_put; int family; int err; u32 mark = x->mark.v & x->mark.m; int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); family = x->props.family; to_put = NULL; spin_lock_bh(&xfrm_state_lock); x1 = __xfrm_state_locate(x, use_spi, family); if (x1) { to_put = x1; x1 = NULL; err = -EEXIST; goto out; } if (use_spi && x->km.seq) { x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq); if (x1 && ((x1->id.proto != x->id.proto) || xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) { to_put = x1; x1 = NULL; } } if (use_spi && !x1) x1 = __find_acq_core(net, &x->mark, family, x->props.mode, x->props.reqid, x->id.proto, &x->id.daddr, &x->props.saddr, 0); __xfrm_state_bump_genids(x); __xfrm_state_insert(x); err = 0; out: spin_unlock_bh(&xfrm_state_lock); if (x1) { xfrm_state_delete(x1); xfrm_state_put(x1); } if (to_put) xfrm_state_put(to_put); return err; } EXPORT_SYMBOL(xfrm_state_add); #ifdef CONFIG_XFRM_MIGRATE static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) { struct net *net = xs_net(orig); int err = -ENOMEM; struct xfrm_state *x = xfrm_state_alloc(net); if (!x) goto out; memcpy(&x->id, &orig->id, sizeof(x->id)); memcpy(&x->sel, &orig->sel, sizeof(x->sel)); memcpy(&x->lft, &orig->lft, sizeof(x->lft)); x->props.mode = orig->props.mode; x->props.replay_window = orig->props.replay_window; x->props.reqid = orig->props.reqid; x->props.family = orig->props.family; x->props.saddr = orig->props.saddr; if (orig->aalg) { x->aalg = xfrm_algo_auth_clone(orig->aalg); if (!x->aalg) goto error; } x->props.aalgo = orig->props.aalgo; if (orig->ealg) { x->ealg = xfrm_algo_clone(orig->ealg); if (!x->ealg) goto error; } x->props.ealgo = orig->props.ealgo; if (orig->calg) { x->calg = xfrm_algo_clone(orig->calg); if (!x->calg) goto error; } x->props.calgo = orig->props.calgo; if (orig->encap) { x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL); if (!x->encap) goto error; } if (orig->coaddr) { x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), GFP_KERNEL); if (!x->coaddr) goto error; } if (orig->replay_esn) { err = xfrm_replay_clone(x, orig); if (err) goto error; } memcpy(&x->mark, &orig->mark, sizeof(x->mark)); err = xfrm_init_state(x); if (err) goto error; x->props.flags = orig->props.flags; x->curlft.add_time = orig->curlft.add_time; x->km.state = orig->km.state; x->km.seq = orig->km.seq; return x; error: xfrm_state_put(x); out: if (errp) *errp = err; return NULL; } /* xfrm_state_lock is held */ struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m) { unsigned int h; struct xfrm_state *x; struct hlist_node *entry; if (m->reqid) { h = xfrm_dst_hash(&init_net, &m->old_daddr, &m->old_saddr, m->reqid, m->old_family); hlist_for_each_entry(x, entry, init_net.xfrm.state_bydst+h, bydst) { if (x->props.mode != m->mode || x->id.proto != m->proto) continue; if (m->reqid && x->props.reqid != m->reqid) continue; if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, m->old_family) || xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, m->old_family)) continue; xfrm_state_hold(x); return x; } } else { h = xfrm_src_hash(&init_net, &m->old_daddr, &m->old_saddr, m->old_family); hlist_for_each_entry(x, entry, init_net.xfrm.state_bysrc+h, bysrc) { if (x->props.mode != m->mode || x->id.proto != m->proto) continue; if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr, m->old_family) || xfrm_addr_cmp(&x->props.saddr, &m->old_saddr, m->old_family)) continue; xfrm_state_hold(x); return x; } } return NULL; } EXPORT_SYMBOL(xfrm_migrate_state_find); struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x, struct xfrm_migrate *m) { struct xfrm_state *xc; int err; xc = xfrm_state_clone(x, &err); if (!xc) return NULL; memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); /* add state */ if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) { /* a care is needed when the destination address of the state is to be updated as it is a part of triplet */ xfrm_state_insert(xc); } else { if ((err = xfrm_state_add(xc)) < 0) goto error; } return xc; error: xfrm_state_put(xc); return NULL; } EXPORT_SYMBOL(xfrm_state_migrate); #endif int xfrm_state_update(struct xfrm_state *x) { struct xfrm_state *x1, *to_put; int err; int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); to_put = NULL; spin_lock_bh(&xfrm_state_lock); x1 = __xfrm_state_locate(x, use_spi, x->props.family); err = -ESRCH; if (!x1) goto out; if (xfrm_state_kern(x1)) { to_put = x1; err = -EEXIST; goto out; } if (x1->km.state == XFRM_STATE_ACQ) { __xfrm_state_insert(x); x = NULL; } err = 0; out: spin_unlock_bh(&xfrm_state_lock); if (to_put) xfrm_state_put(to_put); if (err) return err; if (!x) { xfrm_state_delete(x1); xfrm_state_put(x1); return 0; } err = -EINVAL; spin_lock_bh(&x1->lock); if (likely(x1->km.state == XFRM_STATE_VALID)) { if (x->encap && x1->encap) memcpy(x1->encap, x->encap, sizeof(*x1->encap)); if (x->coaddr && x1->coaddr) { memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); } if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); x1->km.dying = 0; tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); if (x1->curlft.use_time) xfrm_state_check_expire(x1); err = 0; x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); } spin_unlock_bh(&x1->lock); xfrm_state_put(x1); return err; } EXPORT_SYMBOL(xfrm_state_update); int xfrm_state_check_expire(struct xfrm_state *x) { if (!x->curlft.use_time) x->curlft.use_time = get_seconds(); if (x->km.state != XFRM_STATE_VALID) return -EINVAL; if (x->curlft.bytes >= x->lft.hard_byte_limit || x->curlft.packets >= x->lft.hard_packet_limit) { x->km.state = XFRM_STATE_EXPIRED; tasklet_hrtimer_start(&x->mtimer, ktime_set(0,0), HRTIMER_MODE_REL); return -EINVAL; } if (!x->km.dying && (x->curlft.bytes >= x->lft.soft_byte_limit || x->curlft.packets >= x->lft.soft_packet_limit)) { x->km.dying = 1; km_state_expired(x, 0, 0); } return 0; } EXPORT_SYMBOL(xfrm_state_check_expire); struct xfrm_state * xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) { struct xfrm_state *x; spin_lock_bh(&xfrm_state_lock); x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family); spin_unlock_bh(&xfrm_state_lock); return x; } EXPORT_SYMBOL(xfrm_state_lookup); struct xfrm_state * xfrm_state_lookup_byaddr(struct net *net, u32 mark, const xfrm_address_t *daddr, const xfrm_address_t *saddr, u8 proto, unsigned short family) { struct xfrm_state *x; spin_lock_bh(&xfrm_state_lock); x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family); spin_unlock_bh(&xfrm_state_lock); return x; } EXPORT_SYMBOL(xfrm_state_lookup_byaddr); struct xfrm_state * xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create, unsigned short family) { struct xfrm_state *x; spin_lock_bh(&xfrm_state_lock); x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create); spin_unlock_bh(&xfrm_state_lock); return x; } EXPORT_SYMBOL(xfrm_find_acq); #ifdef CONFIG_XFRM_SUB_POLICY int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, unsigned short family) { int err = 0; struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); if (!afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_state_lock); if (afinfo->tmpl_sort) err = afinfo->tmpl_sort(dst, src, n); spin_unlock_bh(&xfrm_state_lock); xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_tmpl_sort); int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, unsigned short family) { int err = 0; struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); if (!afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_state_lock); if (afinfo->state_sort) err = afinfo->state_sort(dst, src, n); spin_unlock_bh(&xfrm_state_lock); xfrm_state_put_afinfo(afinfo); return err; } EXPORT_SYMBOL(xfrm_state_sort); #endif /* Silly enough, but I'm lazy to build resolution list */ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) { int i; for (i = 0; i <= net->xfrm.state_hmask; i++) { struct hlist_node *entry; struct xfrm_state *x; hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) { if (x->km.seq == seq && (mark & x->mark.m) == x->mark.v && x->km.state == XFRM_STATE_ACQ) { xfrm_state_hold(x); return x; } } } return NULL; } struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq) { struct xfrm_state *x; spin_lock_bh(&xfrm_state_lock); x = __xfrm_find_acq_byseq(net, mark, seq); spin_unlock_bh(&xfrm_state_lock); return x; } EXPORT_SYMBOL(xfrm_find_acq_byseq); u32 xfrm_get_acqseq(void) { u32 res; static atomic_t acqseq; do { res = atomic_inc_return(&acqseq); } while (!res); return res; } EXPORT_SYMBOL(xfrm_get_acqseq); int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) { struct net *net = xs_net(x); unsigned int h; struct xfrm_state *x0; int err = -ENOENT; __be32 minspi = htonl(low); __be32 maxspi = htonl(high); u32 mark = x->mark.v & x->mark.m; spin_lock_bh(&x->lock); if (x->km.state == XFRM_STATE_DEAD) goto unlock; err = 0; if (x->id.spi) goto unlock; err = -ENOENT; if (minspi == maxspi) { x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); if (x0) { xfrm_state_put(x0); goto unlock; } x->id.spi = minspi; } else { u32 spi = 0; for (h=0; h<high-low+1; h++) { spi = low + net_random()%(high-low+1); x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); if (x0 == NULL) { x->id.spi = htonl(spi); break; } xfrm_state_put(x0); } } if (x->id.spi) { spin_lock_bh(&xfrm_state_lock); h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); spin_unlock_bh(&xfrm_state_lock); err = 0; } unlock: spin_unlock_bh(&x->lock); return err; } EXPORT_SYMBOL(xfrm_alloc_spi); int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, int (*func)(struct xfrm_state *, int, void*), void *data) { struct xfrm_state *state; struct xfrm_state_walk *x; int err = 0; if (walk->seq != 0 && list_empty(&walk->all)) return 0; spin_lock_bh(&xfrm_state_lock); if (list_empty(&walk->all)) x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); else x = list_entry(&walk->all, struct xfrm_state_walk, all); list_for_each_entry_from(x, &net->xfrm.state_all, all) { if (x->state == XFRM_STATE_DEAD) continue; state = container_of(x, struct xfrm_state, km); if (!xfrm_id_proto_match(state->id.proto, walk->proto)) continue; err = func(state, walk->seq, data); if (err) { list_move_tail(&walk->all, &x->all); goto out; } walk->seq++; } if (walk->seq == 0) { err = -ENOENT; goto out; } list_del_init(&walk->all); out: spin_unlock_bh(&xfrm_state_lock); return err; } EXPORT_SYMBOL(xfrm_state_walk); void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) { INIT_LIST_HEAD(&walk->all); walk->proto = proto; walk->state = XFRM_STATE_DEAD; walk->seq = 0; } EXPORT_SYMBOL(xfrm_state_walk_init); void xfrm_state_walk_done(struct xfrm_state_walk *walk) { if (list_empty(&walk->all)) return; spin_lock_bh(&xfrm_state_lock); list_del(&walk->all); spin_unlock_bh(&xfrm_state_lock); } EXPORT_SYMBOL(xfrm_state_walk_done); static void xfrm_replay_timer_handler(unsigned long data) { struct xfrm_state *x = (struct xfrm_state*)data; spin_lock(&x->lock); if (x->km.state == XFRM_STATE_VALID) { if (xfrm_aevent_is_on(xs_net(x))) x->repl->notify(x, XFRM_REPLAY_TIMEOUT); else x->xflags |= XFRM_TIME_DEFER; } spin_unlock(&x->lock); } static LIST_HEAD(xfrm_km_list); static DEFINE_RWLOCK(xfrm_km_lock); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) if (km->notify_policy) km->notify_policy(xp, dir, c); read_unlock(&xfrm_km_lock); } void km_state_notify(struct xfrm_state *x, const struct km_event *c) { struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) if (km->notify) km->notify(x, c); read_unlock(&xfrm_km_lock); } EXPORT_SYMBOL(km_policy_notify); EXPORT_SYMBOL(km_state_notify); void km_state_expired(struct xfrm_state *x, int hard, u32 pid) { struct net *net = xs_net(x); struct km_event c; c.data.hard = hard; c.pid = pid; c.event = XFRM_MSG_EXPIRE; km_state_notify(x, &c); if (hard) wake_up(&net->xfrm.km_waitq); } EXPORT_SYMBOL(km_state_expired); /* * We send to all registered managers regardless of failure * We are happy with one success */ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) { int err = -EINVAL, acqret; struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) { acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT); if (!acqret) err = acqret; } read_unlock(&xfrm_km_lock); return err; } EXPORT_SYMBOL(km_query); int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) { int err = -EINVAL; struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) { if (km->new_mapping) err = km->new_mapping(x, ipaddr, sport); if (!err) break; } read_unlock(&xfrm_km_lock); return err; } EXPORT_SYMBOL(km_new_mapping); void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid) { struct net *net = xp_net(pol); struct km_event c; c.data.hard = hard; c.pid = pid; c.event = XFRM_MSG_POLEXPIRE; km_policy_notify(pol, dir, &c); if (hard) wake_up(&net->xfrm.km_waitq); } EXPORT_SYMBOL(km_policy_expired); #ifdef CONFIG_XFRM_MIGRATE int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k) { int err = -EINVAL; int ret; struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) { if (km->migrate) { ret = km->migrate(sel, dir, type, m, num_migrate, k); if (!ret) err = ret; } } read_unlock(&xfrm_km_lock); return err; } EXPORT_SYMBOL(km_migrate); #endif int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { int err = -EINVAL; int ret; struct xfrm_mgr *km; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) { if (km->report) { ret = km->report(net, proto, sel, addr); if (!ret) err = ret; } } read_unlock(&xfrm_km_lock); return err; } EXPORT_SYMBOL(km_report); int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) { int err; u8 *data; struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; if (optlen <= 0 || optlen > PAGE_SIZE) return -EMSGSIZE; data = kmalloc(optlen, GFP_KERNEL); if (!data) return -ENOMEM; err = -EFAULT; if (copy_from_user(data, optval, optlen)) goto out; err = -EINVAL; read_lock(&xfrm_km_lock); list_for_each_entry(km, &xfrm_km_list, list) { pol = km->compile_policy(sk, optname, data, optlen, &err); if (err >= 0) break; } read_unlock(&xfrm_km_lock); if (err >= 0) { xfrm_sk_policy_insert(sk, err, pol); xfrm_pol_put(pol); err = 0; } out: kfree(data); return err; } EXPORT_SYMBOL(xfrm_user_policy); int xfrm_register_km(struct xfrm_mgr *km) { write_lock_bh(&xfrm_km_lock); list_add_tail(&km->list, &xfrm_km_list); write_unlock_bh(&xfrm_km_lock); return 0; } EXPORT_SYMBOL(xfrm_register_km); int xfrm_unregister_km(struct xfrm_mgr *km) { write_lock_bh(&xfrm_km_lock); list_del(&km->list); write_unlock_bh(&xfrm_km_lock); return 0; } EXPORT_SYMBOL(xfrm_unregister_km); int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) { int err = 0; if (unlikely(afinfo == NULL)) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; write_lock_bh(&xfrm_state_afinfo_lock); if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) err = -ENOBUFS; else xfrm_state_afinfo[afinfo->family] = afinfo; write_unlock_bh(&xfrm_state_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_state_register_afinfo); int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) { int err = 0; if (unlikely(afinfo == NULL)) return -EINVAL; if (unlikely(afinfo->family >= NPROTO)) return -EAFNOSUPPORT; write_lock_bh(&xfrm_state_afinfo_lock); if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo)) err = -EINVAL; else xfrm_state_afinfo[afinfo->family] = NULL; } write_unlock_bh(&xfrm_state_afinfo_lock); return err; } EXPORT_SYMBOL(xfrm_state_unregister_afinfo); static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) { struct xfrm_state_afinfo *afinfo; if (unlikely(family >= NPROTO)) return NULL; read_lock(&xfrm_state_afinfo_lock); afinfo = xfrm_state_afinfo[family]; if (unlikely(!afinfo)) read_unlock(&xfrm_state_afinfo_lock); return afinfo; } static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) __releases(xfrm_state_afinfo_lock) { read_unlock(&xfrm_state_afinfo_lock); } /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ void xfrm_state_delete_tunnel(struct xfrm_state *x) { if (x->tunnel) { struct xfrm_state *t = x->tunnel; if (atomic_read(&t->tunnel_users) == 2) xfrm_state_delete(t); atomic_dec(&t->tunnel_users); xfrm_state_put(t); x->tunnel = NULL; } } EXPORT_SYMBOL(xfrm_state_delete_tunnel); int xfrm_state_mtu(struct xfrm_state *x, int mtu) { int res; spin_lock_bh(&x->lock); if (x->km.state == XFRM_STATE_VALID && x->type && x->type->get_mtu) res = x->type->get_mtu(x, mtu); else res = mtu - x->props.header_len; spin_unlock_bh(&x->lock); return res; } int __xfrm_init_state(struct xfrm_state *x, bool init_replay) { struct xfrm_state_afinfo *afinfo; struct xfrm_mode *inner_mode; int family = x->props.family; int err; err = -EAFNOSUPPORT; afinfo = xfrm_state_get_afinfo(family); if (!afinfo) goto error; err = 0; if (afinfo->init_flags) err = afinfo->init_flags(x); xfrm_state_put_afinfo(afinfo); if (err) goto error; err = -EPROTONOSUPPORT; if (x->sel.family != AF_UNSPEC) { inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); if (inner_mode == NULL) goto error; if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && family != x->sel.family) { xfrm_put_mode(inner_mode); goto error; } x->inner_mode = inner_mode; } else { struct xfrm_mode *inner_mode_iaf; int iafamily = AF_INET; inner_mode = xfrm_get_mode(x->props.mode, x->props.family); if (inner_mode == NULL) goto error; if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) { xfrm_put_mode(inner_mode); goto error; } x->inner_mode = inner_mode; if (x->props.family == AF_INET) iafamily = AF_INET6; inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); if (inner_mode_iaf) { if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) x->inner_mode_iaf = inner_mode_iaf; else xfrm_put_mode(inner_mode_iaf); } } x->type = xfrm_get_type(x->id.proto, family); if (x->type == NULL) goto error; err = x->type->init_state(x); if (err) goto error; x->outer_mode = xfrm_get_mode(x->props.mode, family); if (x->outer_mode == NULL) goto error; if (init_replay) { err = xfrm_init_replay(x); if (err) goto error; } x->km.state = XFRM_STATE_VALID; error: return err; } EXPORT_SYMBOL(__xfrm_init_state); int xfrm_init_state(struct xfrm_state *x) { return __xfrm_init_state(x, true); } EXPORT_SYMBOL(xfrm_init_state); int __net_init xfrm_state_init(struct net *net) { unsigned int sz; INIT_LIST_HEAD(&net->xfrm.state_all); sz = sizeof(struct hlist_head) * 8; net->xfrm.state_bydst = xfrm_hash_alloc(sz); if (!net->xfrm.state_bydst) goto out_bydst; net->xfrm.state_bysrc = xfrm_hash_alloc(sz); if (!net->xfrm.state_bysrc) goto out_bysrc; net->xfrm.state_byspi = xfrm_hash_alloc(sz); if (!net->xfrm.state_byspi) goto out_byspi; net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); INIT_HLIST_HEAD(&net->xfrm.state_gc_list); INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task); init_waitqueue_head(&net->xfrm.km_waitq); return 0; out_byspi: xfrm_hash_free(net->xfrm.state_bysrc, sz); out_bysrc: xfrm_hash_free(net->xfrm.state_bydst, sz); out_bydst: return -ENOMEM; } void xfrm_state_fini(struct net *net) { struct xfrm_audit audit_info; unsigned int sz; flush_work(&net->xfrm.state_hash_work); audit_info.loginuid = -1; audit_info.sessionid = -1; audit_info.secid = 0; xfrm_state_flush(net, IPSEC_PROTO_ANY, &audit_info); flush_work(&net->xfrm.state_gc_work); WARN_ON(!list_empty(&net->xfrm.state_all)); sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); WARN_ON(!hlist_empty(net->xfrm.state_byspi)); xfrm_hash_free(net->xfrm.state_byspi, sz); WARN_ON(!hlist_empty(net->xfrm.state_bysrc)); xfrm_hash_free(net->xfrm.state_bysrc, sz); WARN_ON(!hlist_empty(net->xfrm.state_bydst)); xfrm_hash_free(net->xfrm.state_bydst, sz); } #ifdef CONFIG_AUDITSYSCALL static void xfrm_audit_helper_sainfo(struct xfrm_state *x, struct audit_buffer *audit_buf) { struct xfrm_sec_ctx *ctx = x->security; u32 spi = ntohl(x->id.spi); if (ctx) audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); switch(x->props.family) { case AF_INET: audit_log_format(audit_buf, " src=%pI4 dst=%pI4", &x->props.saddr.a4, &x->id.daddr.a4); break; case AF_INET6: audit_log_format(audit_buf, " src=%pI6 dst=%pI6", x->props.saddr.a6, x->id.daddr.a6); break; } audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); } static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, struct audit_buffer *audit_buf) { const struct iphdr *iph4; const struct ipv6hdr *iph6; switch (family) { case AF_INET: iph4 = ip_hdr(skb); audit_log_format(audit_buf, " src=%pI4 dst=%pI4", &iph4->saddr, &iph4->daddr); break; case AF_INET6: iph6 = ipv6_hdr(skb); audit_log_format(audit_buf, " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", &iph6->saddr,&iph6->daddr, iph6->flow_lbl[0] & 0x0f, iph6->flow_lbl[1], iph6->flow_lbl[2]); break; } } void xfrm_audit_state_add(struct xfrm_state *x, int result, uid_t auid, u32 sessionid, u32 secid) { struct audit_buffer *audit_buf; audit_buf = xfrm_audit_start("SAD-add"); if (audit_buf == NULL) return; xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); xfrm_audit_helper_sainfo(x, audit_buf); audit_log_format(audit_buf, " res=%u", result); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_add); void xfrm_audit_state_delete(struct xfrm_state *x, int result, uid_t auid, u32 sessionid, u32 secid) { struct audit_buffer *audit_buf; audit_buf = xfrm_audit_start("SAD-delete"); if (audit_buf == NULL) return; xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); xfrm_audit_helper_sainfo(x, audit_buf); audit_log_format(audit_buf, " res=%u", result); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); void xfrm_audit_state_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) { struct audit_buffer *audit_buf; u32 spi; audit_buf = xfrm_audit_start("SA-replay-overflow"); if (audit_buf == NULL) return; xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); /* don't record the sequence number because it's inherent in this kind * of audit message */ spi = ntohl(x->id.spi); audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq) { struct audit_buffer *audit_buf; u32 spi; audit_buf = xfrm_audit_start("SA-replayed-pkt"); if (audit_buf == NULL) return; xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); spi = ntohl(x->id.spi); audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", spi, spi, ntohl(net_seq)); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_replay); void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) { struct audit_buffer *audit_buf; audit_buf = xfrm_audit_start("SA-notfound"); if (audit_buf == NULL) return; xfrm_audit_helper_pktinfo(skb, family, audit_buf); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi, __be32 net_seq) { struct audit_buffer *audit_buf; u32 spi; audit_buf = xfrm_audit_start("SA-notfound"); if (audit_buf == NULL) return; xfrm_audit_helper_pktinfo(skb, family, audit_buf); spi = ntohl(net_spi); audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", spi, spi, ntohl(net_seq)); audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb, u8 proto) { struct audit_buffer *audit_buf; __be32 net_spi; __be32 net_seq; audit_buf = xfrm_audit_start("SA-icv-failure"); if (audit_buf == NULL) return; xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { u32 spi = ntohl(net_spi); audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", spi, spi, ntohl(net_seq)); } audit_log_end(audit_buf); } EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); #endif /* CONFIG_AUDITSYSCALL */
gpl-2.0
AttiJeong98/Elf-Kernel_M250S_JB
net/ipv6/xfrm6_state.c
2944
4758
/* * xfrm6_state.c: based on xfrm4_state.c * * Authors: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * YOSHIFUJI Hideaki @USAGI * Split up af-specific portion * */ #include <net/xfrm.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/netfilter_ipv6.h> #include <net/dsfield.h> #include <net/ipv6.h> #include <net/addrconf.h> static void __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi6 *fl6 = &fl->u.ip6; /* Initialize temporary selector matching only * to current session. */ ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr); ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr); sel->dport = xfrm_flowi_dport(fl, &fl6->uli); sel->dport_mask = htons(0xffff); sel->sport = xfrm_flowi_sport(fl, &fl6->uli); sel->sport_mask = htons(0xffff); sel->family = AF_INET6; sel->prefixlen_d = 128; sel->prefixlen_s = 128; sel->proto = fl6->flowi6_proto; sel->ifindex = fl6->flowi6_oif; } static void xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr) { x->id = tmpl->id; if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); if (ipv6_addr_any((struct in6_addr*)&x->props.saddr)) memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); x->props.mode = tmpl->mode; x->props.reqid = tmpl->reqid; x->props.family = AF_INET6; } /* distribution counting sort function for xfrm_state and xfrm_tmpl */ static int __xfrm6_sort(void **dst, void **src, int n, int (*cmp)(void *p), int maxclass) { int i; int class[XFRM_MAX_DEPTH]; int count[maxclass]; memset(count, 0, sizeof(count)); for (i = 0; i < n; i++) { int c; class[i] = c = cmp(src[i]); count[c]++; } for (i = 2; i < maxclass; i++) count[i] += count[i - 1]; for (i = 0; i < n; i++) { dst[count[class[i] - 1]++] = src[i]; src[i] = NULL; } return 0; } /* * Rule for xfrm_state: * * rule 1: select IPsec transport except AH * rule 2: select MIPv6 RO or inbound trigger * rule 3: select IPsec transport AH * rule 4: select IPsec tunnel * rule 5: others */ static int __xfrm6_state_sort_cmp(void *p) { struct xfrm_state *v = p; switch (v->props.mode) { case XFRM_MODE_TRANSPORT: if (v->id.proto != IPPROTO_AH) return 1; else return 3; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_IN_TRIGGER: return 2; #endif case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: return 4; } return 5; } static int __xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n) { return __xfrm6_sort((void **)dst, (void **)src, n, __xfrm6_state_sort_cmp, 6); } /* * Rule for xfrm_tmpl: * * rule 1: select IPsec transport * rule 2: select MIPv6 RO or inbound trigger * rule 3: select IPsec tunnel * rule 4: others */ static int __xfrm6_tmpl_sort_cmp(void *p) { struct xfrm_tmpl *v = p; switch (v->mode) { case XFRM_MODE_TRANSPORT: return 1; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_IN_TRIGGER: return 2; #endif case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: return 3; } return 4; } static int __xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n) { return __xfrm6_sort((void **)dst, (void **)src, n, __xfrm6_tmpl_sort_cmp, 5); } int xfrm6_extract_header(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); XFRM_MODE_SKB_CB(skb)->id = 0; XFRM_MODE_SKB_CB(skb)->frag_off = htons(IP_DF); XFRM_MODE_SKB_CB(skb)->tos = ipv6_get_dsfield(iph); XFRM_MODE_SKB_CB(skb)->ttl = iph->hop_limit; XFRM_MODE_SKB_CB(skb)->optlen = 0; memcpy(XFRM_MODE_SKB_CB(skb)->flow_lbl, iph->flow_lbl, sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl)); return 0; } static struct xfrm_state_afinfo xfrm6_state_afinfo = { .family = AF_INET6, .proto = IPPROTO_IPV6, .eth_proto = htons(ETH_P_IPV6), .owner = THIS_MODULE, .init_tempsel = __xfrm6_init_tempsel, .init_temprop = xfrm6_init_temprop, .tmpl_sort = __xfrm6_tmpl_sort, .state_sort = __xfrm6_state_sort, .output = xfrm6_output, .output_finish = xfrm6_output_finish, .extract_input = xfrm6_extract_input, .extract_output = xfrm6_extract_output, .transport_finish = xfrm6_transport_finish, }; int __init xfrm6_state_init(void) { return xfrm_state_register_afinfo(&xfrm6_state_afinfo); } void xfrm6_state_fini(void) { xfrm_state_unregister_afinfo(&xfrm6_state_afinfo); }
gpl-2.0
paulocastro31/android_kernel_motorola_msm8226
drivers/infiniband/hw/nes/nes.c
4992
34385
/* * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/in.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/if_arp.h> #include <linux/highmem.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <rdma/ib_smi.h> #include <rdma/ib_verbs.h> #include <rdma/ib_pack.h> #include <rdma/iw_cm.h> #include "nes.h" #include <net/netevent.h> #include <net/neighbour.h> #include <linux/route.h> #include <net/ip_fib.h> MODULE_AUTHOR("NetEffect"); MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); int max_mtu = 9000; int interrupt_mod_interval = 0; /* Interoperability */ int mpa_version = 1; module_param(mpa_version, int, 0644); MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)"); /* Interoperability */ int disable_mpa_crc = 0; module_param(disable_mpa_crc, int, 0644); MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); unsigned int send_first = 0; module_param(send_first, int, 0644); MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU; module_param(nes_drv_opt, int, 0644); MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); unsigned int nes_debug_level = 0; module_param_named(debug_level, nes_debug_level, uint, 0644); MODULE_PARM_DESC(debug_level, "Enable debug output level"); unsigned int wqm_quanta = 0x10000; module_param(wqm_quanta, int, 0644); MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); static bool limit_maxrdreqsz; module_param(limit_maxrdreqsz, bool, 0644); MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); atomic_t qps_destroyed; static unsigned int ee_flsh_adapter; static unsigned int sysfs_nonidx_addr; static unsigned int sysfs_idx_addr; static struct pci_device_id nes_pci_table[] = { { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), }, { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), }, {0} }; MODULE_DEVICE_TABLE(pci, nes_pci_table); static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); static int nes_net_event(struct notifier_block *, unsigned long, void *); static int nes_notifiers_registered; static struct notifier_block nes_inetaddr_notifier = { .notifier_call = nes_inetaddr_event }; static struct notifier_block nes_net_notifier = { .notifier_call = nes_net_event }; /** * nes_inetaddr_event */ static int nes_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *event_netdev = ifa->ifa_dev->dev; struct nes_device *nesdev; struct net_device *netdev; struct nes_vnic *nesvnic; unsigned int is_bonded; nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %pI4, netmask %pI4.\n", &ifa->ifa_address, &ifa->ifa_mask); list_for_each_entry(nesdev, &nes_dev_list, list) { nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", nesdev, nesdev->netdev[0]->name); netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); is_bonded = netif_is_bond_slave(netdev) && (netdev->master == event_netdev); if ((netdev == event_netdev) || is_bonded) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" " RDMA is not enabled.\n", netdev->name); return NOTIFY_OK; } /* we have ifa->ifa_address/mask here if we need it */ switch (event) { case NETDEV_DOWN: nes_debug(NES_DBG_NETDEV, "event:DOWN\n"); nes_write_indexed(nesdev, NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0); nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE); nesvnic->local_ipaddr = 0; if (is_bonded) continue; else return NOTIFY_OK; break; case NETDEV_UP: nes_debug(NES_DBG_NETDEV, "event:UP\n"); if (nesvnic->local_ipaddr != 0) { nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n"); return NOTIFY_OK; } /* fall through */ case NETDEV_CHANGEADDR: /* Add the address to the IP table */ if (netdev->master) nesvnic->local_ipaddr = ((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address; else nesvnic->local_ipaddr = ifa->ifa_address; nes_write_indexed(nesdev, NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), ntohl(nesvnic->local_ipaddr)); nes_manage_arp_cache(netdev, netdev->dev_addr, ntohl(nesvnic->local_ipaddr), NES_ARP_ADD); if (is_bonded) continue; else return NOTIFY_OK; break; default: break; } } } return NOTIFY_DONE; } /** * nes_net_event */ static int nes_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct neighbour *neigh = ptr; struct nes_device *nesdev; struct net_device *netdev; struct nes_vnic *nesvnic; switch (event) { case NETEVENT_NEIGH_UPDATE: list_for_each_entry(nesdev, &nes_dev_list, list) { /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */ netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); if (netdev == neigh->dev) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n", netdev->name); } else { if (neigh->nud_state & NUD_VALID) { nes_manage_arp_cache(neigh->dev, neigh->ha, ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD); } else { nes_manage_arp_cache(neigh->dev, neigh->ha, ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE); } } return NOTIFY_OK; } } break; default: nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event); break; } return NOTIFY_DONE; } /** * nes_add_ref */ void nes_add_ref(struct ib_qp *ibqp) { struct nes_qp *nesqp; nesqp = to_nesqp(ibqp); nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n", ibqp->qp_num, atomic_read(&nesqp->refcount)); atomic_inc(&nesqp->refcount); } static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request) { unsigned long flags; struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; struct nes_adapter *nesadapter = nesdev->nesadapter; atomic_inc(&qps_destroyed); /* Free the control structures */ if (nesqp->pbl_vbase) { pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); spin_lock_irqsave(&nesadapter->pbl_lock, flags); nesadapter->free_256pbl++; spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase); nesqp->pbl_vbase = NULL; } else { pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase); } nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id); nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL; kfree(nesqp->allocated_buffer); } /** * nes_rem_ref */ void nes_rem_ref(struct ib_qp *ibqp) { u64 u64temp; struct nes_qp *nesqp; struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); struct nes_device *nesdev = nesvnic->nesdev; struct nes_hw_cqp_wqe *cqp_wqe; struct nes_cqp_request *cqp_request; u32 opcode; nesqp = to_nesqp(ibqp); if (atomic_read(&nesqp->refcount) == 0) { printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n", __func__, ibqp->qp_num, nesqp->last_aeq); BUG(); } if (atomic_dec_and_test(&nesqp->refcount)) { if (nesqp->pau_mode) nes_destroy_pau_qp(nesdev, nesqp); /* Destroy the QP */ cqp_request = nes_get_cqp_request(nesdev); if (cqp_request == NULL) { nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n"); return; } cqp_request->waiting = 0; cqp_request->callback = 1; cqp_request->cqp_callback = nes_cqp_rem_ref_callback; cqp_request->cqp_callback_pointer = nesqp; cqp_wqe = &cqp_request->cqp_wqe; nes_fill_init_cqp_wqe(cqp_wqe, nesdev); opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP; if (nesqp->hte_added) { opcode |= NES_CQP_QP_DEL_HTE; nesqp->hte_added = 0; } set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); u64temp = (u64)nesqp->nesqp_context_pbase; set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp); nes_post_cqp_request(nesdev, cqp_request); } } /** * nes_get_qp */ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) { struct nes_vnic *nesvnic = to_nesvnic(device); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp))) return NULL; return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; } /** * nes_print_macaddr */ static void nes_print_macaddr(struct net_device *netdev) { nes_debug(NES_DBG_INIT, "%s: %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); } /** * nes_interrupt - handle interrupts */ static irqreturn_t nes_interrupt(int irq, void *dev_id) { struct nes_device *nesdev = (struct nes_device *)dev_id; int handled = 0; u32 int_mask; u32 int_req; u32 int_stat; u32 intf_int_stat; u32 timer_stat; if (nesdev->msi_enabled) { /* No need to read the interrupt pending register if msi is enabled */ handled = 1; } else { if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) { /* Master interrupt enable provides synchronization for kicking off bottom half when interrupt sharing is going on */ int_mask = nes_read32(nesdev->regs + NES_INT_MASK); if (int_mask & 0x80000000) { /* Check interrupt status to see if this might be ours */ int_stat = nes_read32(nesdev->regs + NES_INT_STAT); int_req = nesdev->int_req; if (int_stat&int_req) { /* if interesting CEQ or AEQ is pending, claim the interrupt */ if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) { handled = 1; } else { if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) { /* Timer might be running but might be for another function */ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT); if ((timer_stat & nesdev->timer_int_req) != 0) { handled = 1; } } if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) && (handled == 0)) { intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); if ((intf_int_stat & nesdev->intf_int_req) != 0) { handled = 1; } } } if (handled) { nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000)); int_mask = nes_read32(nesdev->regs+NES_INT_MASK); /* Save off the status to save an additional read */ nesdev->int_stat = int_stat; nesdev->napi_isr_ran = 1; } } } } else { handled = nes_read32(nesdev->regs+NES_INT_PENDING); } } if (handled) { if (nes_napi_isr(nesdev) == 0) { tasklet_schedule(&nesdev->dpc_tasklet); } return IRQ_HANDLED; } else { return IRQ_NONE; } } /** * nes_probe - Device initialization */ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct nes_device *nesdev = NULL; int ret = 0; void __iomem *mmio_regs = NULL; u8 hw_rev; assert(pcidev != NULL); assert(ent != NULL); printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n", DRV_VERSION, pci_name(pcidev)); ret = pci_enable_device(pcidev); if (ret) { printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev)); goto bail0; } nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n", (long unsigned int)pci_resource_start(pcidev, BAR_0), (long unsigned int)pci_resource_len(pcidev, BAR_0)); nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n", (long unsigned int)pci_resource_start(pcidev, BAR_1), (long unsigned int)pci_resource_len(pcidev, BAR_1)); /* Make sure PCI base addr are MMIO */ if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) || !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) { printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); ret = -ENODEV; goto bail1; } /* Reserve PCI I/O and memory resources */ ret = pci_request_regions(pcidev, DRV_NAME); if (ret) { printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev)); goto bail1; } if ((sizeof(dma_addr_t) > 4)) { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_ERR PFX "64b DMA mask configuration failed\n"); goto bail2; } ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n"); goto bail2; } } else { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret < 0) { printk(KERN_ERR PFX "32b DMA mask configuration failed\n"); goto bail2; } ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n"); goto bail2; } } pci_set_master(pcidev); /* Allocate hardware structure */ nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL); if (!nesdev) { printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev)); ret = -ENOMEM; goto bail2; } nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev); nesdev->pcidev = pcidev; pci_set_drvdata(pcidev, nesdev); pci_read_config_byte(pcidev, 0x0008, &hw_rev); nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev); spin_lock_init(&nesdev->indexed_regs_lock); /* Remap the PCI registers in adapter BAR0 to kernel VA space */ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), pci_resource_len(pcidev, BAR_0)); if (mmio_regs == NULL) { printk(KERN_ERR PFX "Unable to remap BAR0\n"); ret = -EIO; goto bail3; } nesdev->regs = mmio_regs; nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs; /* Ensure interrupts are disabled */ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff); if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) { if (!pci_enable_msi(nesdev->pcidev)) { nesdev->msi_enabled = 1; nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n", pci_name(pcidev)); } else { nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n", pci_name(pcidev)); } } else { nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n", pci_name(pcidev)); } nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0); nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1); /* Init the adapter */ nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); if (!nesdev->nesadapter) { printk(KERN_ERR PFX "Unable to initialize adapter.\n"); ret = -ENOMEM; goto bail5; } nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; nesdev->nesadapter->wqm_quanta = wqm_quanta; /* nesdev->base_doorbell_index = nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ nesdev->base_doorbell_index = 1; nesdev->doorbell_start = nesdev->nesadapter->doorbell_start; if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { switch (PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count) { case 1: nesdev->mac_index = 2; break; case 2: nesdev->mac_index = 1; break; case 3: nesdev->mac_index = 3; break; case 0: default: nesdev->mac_index = 0; } } else { nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count; } if ((limit_maxrdreqsz || ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) && (hw_rev == NE020_REV1))) && (pcie_get_readrq(pcidev) > 256)) { if (pcie_set_readrq(pcidev, 256)) printk(KERN_ERR PFX "Unable to set max read request" " to 256 bytes\n"); else nes_debug(NES_DBG_INIT, "Max read request size set" " to 256 bytes\n"); } tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); /* bring up the Control QP */ if (nes_init_cqp(nesdev)) { ret = -ENODEV; goto bail6; } /* Arm the CCQ */ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | PCI_FUNC(nesdev->pcidev->devfn)); nes_read32(nesdev->regs+NES_CQE_ALLOC); /* Enable the interrupts */ nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) | (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); if (PCI_FUNC(nesdev->pcidev->devfn) < 4) { nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24)); } /* TODO: This really should be the first driver to load, not function 0 */ if (PCI_FUNC(nesdev->pcidev->devfn) == 0) { /* pick up PCI and critical errors if the first driver to load */ nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR; nesdev->int_req |= NES_INT_INTF; } else { nesdev->intf_int_req = 0; } nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804); nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790); /* deal with both periodic and one_shot */ nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn); nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req; nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n", PCI_FUNC(nesdev->pcidev->devfn), nesdev->timer_int_req, nesdev->nesadapter->timer_int_req); nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); list_add_tail(&nesdev->list, &nes_dev_list); /* Request an interrupt line for the driver */ ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev); if (ret) { printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", pci_name(pcidev), pcidev->irq); goto bail65; } nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); if (nes_notifiers_registered == 0) { register_inetaddr_notifier(&nes_inetaddr_notifier); register_netevent_notifier(&nes_net_notifier); } nes_notifiers_registered++; INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); /* Initialize network devices */ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) goto bail7; /* Register network device */ ret = register_netdev(netdev); if (ret) { printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); nes_netdev_destroy(netdev); goto bail7; } nes_print_macaddr(netdev); nesdev->netdev_count++; nesdev->nesadapter->netdev_count++; printk(KERN_INFO PFX "%s: NetEffect RNIC driver successfully loaded.\n", pci_name(pcidev)); return 0; bail7: printk(KERN_ERR PFX "bail7\n"); while (nesdev->netdev_count > 0) { nesdev->netdev_count--; nesdev->nesadapter->netdev_count--; unregister_netdev(nesdev->netdev[nesdev->netdev_count]); nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]); } nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", nesdev->netdev_count, nesdev->nesadapter->netdev_count); nes_notifiers_registered--; if (nes_notifiers_registered == 0) { unregister_netevent_notifier(&nes_net_notifier); unregister_inetaddr_notifier(&nes_inetaddr_notifier); } list_del(&nesdev->list); nes_destroy_cqp(nesdev); bail65: printk(KERN_ERR PFX "bail65\n"); free_irq(pcidev->irq, nesdev); if (nesdev->msi_enabled) { pci_disable_msi(pcidev); } bail6: printk(KERN_ERR PFX "bail6\n"); tasklet_kill(&nesdev->dpc_tasklet); /* Deallocate the Adapter Structure */ nes_destroy_adapter(nesdev->nesadapter); bail5: printk(KERN_ERR PFX "bail5\n"); iounmap(nesdev->regs); bail3: printk(KERN_ERR PFX "bail3\n"); kfree(nesdev); bail2: pci_release_regions(pcidev); bail1: pci_disable_device(pcidev); bail0: return ret; } /** * nes_remove - unload from kernel */ static void __devexit nes_remove(struct pci_dev *pcidev) { struct nes_device *nesdev = pci_get_drvdata(pcidev); struct net_device *netdev; int netdev_index = 0; unsigned long flags; if (nesdev->netdev_count) { netdev = nesdev->netdev[netdev_index]; if (netdev) { netif_stop_queue(netdev); unregister_netdev(netdev); nes_netdev_destroy(netdev); nesdev->netdev[netdev_index] = NULL; nesdev->netdev_count--; nesdev->nesadapter->netdev_count--; } } nes_notifiers_registered--; if (nes_notifiers_registered == 0) { unregister_netevent_notifier(&nes_net_notifier); unregister_inetaddr_notifier(&nes_inetaddr_notifier); } list_del(&nesdev->list); nes_destroy_cqp(nesdev); free_irq(pcidev->irq, nesdev); tasklet_kill(&nesdev->dpc_tasklet); spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); if (nesdev->link_recheck) { spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); cancel_delayed_work_sync(&nesdev->work); } else { spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); } /* Deallocate the Adapter Structure */ nes_destroy_adapter(nesdev->nesadapter); if (nesdev->msi_enabled) { pci_disable_msi(pcidev); } iounmap(nesdev->regs); kfree(nesdev); /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */ pci_release_regions(pcidev); pci_disable_device(pcidev); pci_set_drvdata(pcidev, NULL); } static struct pci_driver nes_pci_driver = { .name = DRV_NAME, .id_table = nes_pci_table, .probe = nes_probe, .remove = __devexit_p(nes_remove), }; static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) { unsigned int devfn = 0xffffffff; unsigned char bus_number = 0xff; unsigned int i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { devfn = nesdev->pcidev->devfn; bus_number = nesdev->pcidev->bus->number; break; } i++; } return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn); } static ssize_t nes_store_adapter(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; ee_flsh_adapter = simple_strtoul(p, &p, 10); return strnlen(buf, count); } static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf) { u32 eeprom_cmd = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd); } static ssize_t nes_store_ee_cmd(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf) { u32 eeprom_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data); } static ssize_t nes_store_ee_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_EEPROM_DATA, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf) { u32 flash_cmd = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd); } static ssize_t nes_store_flash_cmd(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_FLASH_COMMAND, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf) { u32 flash_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data); } static ssize_t nes_store_flash_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + NES_FLASH_DATA, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr); } static ssize_t nes_store_nonidx_addr(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') sysfs_nonidx_addr = simple_strtoul(p, &p, 16); return strnlen(buf, count); } static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf) { u32 nonidx_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data); } static ssize_t nes_store_nonidx_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write32(nesdev->regs + sysfs_nonidx_addr, val); break; } i++; } } return strnlen(buf, count); } static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr); } static ssize_t nes_store_idx_addr(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') sysfs_idx_addr = simple_strtoul(p, &p, 16); return strnlen(buf, count); } static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf) { u32 idx_data = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { idx_data = nes_read_indexed(nesdev, sysfs_idx_addr); break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data); } static ssize_t nes_store_idx_data(struct device_driver *ddp, const char *buf, size_t count) { char *p = (char *)buf; u32 val; u32 i = 0; struct nes_device *nesdev; if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { val = simple_strtoul(p, &p, 16); list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nes_write_indexed(nesdev, sysfs_idx_addr, val); break; } i++; } } return strnlen(buf, count); } /** * nes_show_wqm_quanta */ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) { u32 wqm_quanta_value = 0xdead; u32 i = 0; struct nes_device *nesdev; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { wqm_quanta_value = nesdev->nesadapter->wqm_quanta; break; } i++; } return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value); } /** * nes_store_wqm_quanta */ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, const char *buf, size_t count) { unsigned long wqm_quanta_value; u32 wqm_config1; u32 i = 0; struct nes_device *nesdev; if (kstrtoul(buf, 0, &wqm_quanta_value) < 0) return -EINVAL; list_for_each_entry(nesdev, &nes_dev_list, list) { if (i == ee_flsh_adapter) { nesdev->nesadapter->wqm_quanta = wqm_quanta_value; wqm_config1 = nes_read_indexed(nesdev, NES_IDX_WQM_CONFIG1); nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, ((wqm_quanta_value << 1) | (wqm_config1 & 0x00000001))); break; } i++; } return strnlen(buf, count); } static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, nes_show_adapter, nes_store_adapter); static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, nes_show_ee_cmd, nes_store_ee_cmd); static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR, nes_show_ee_data, nes_store_ee_data); static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR, nes_show_flash_cmd, nes_store_flash_cmd); static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR, nes_show_flash_data, nes_store_flash_data); static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR, nes_show_nonidx_addr, nes_store_nonidx_addr); static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR, nes_show_nonidx_data, nes_store_nonidx_data); static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR, nes_show_idx_addr, nes_store_idx_addr); static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, nes_show_idx_data, nes_store_idx_data); static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR, nes_show_wqm_quanta, nes_store_wqm_quanta); static int nes_create_driver_sysfs(struct pci_driver *drv) { int error; error = driver_create_file(&drv->driver, &driver_attr_adapter); error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd); error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data); error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd); error |= driver_create_file(&drv->driver, &driver_attr_flash_data); error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr); error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data); error |= driver_create_file(&drv->driver, &driver_attr_idx_addr); error |= driver_create_file(&drv->driver, &driver_attr_idx_data); error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta); return error; } static void nes_remove_driver_sysfs(struct pci_driver *drv) { driver_remove_file(&drv->driver, &driver_attr_adapter); driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd); driver_remove_file(&drv->driver, &driver_attr_eeprom_data); driver_remove_file(&drv->driver, &driver_attr_flash_cmd); driver_remove_file(&drv->driver, &driver_attr_flash_data); driver_remove_file(&drv->driver, &driver_attr_nonidx_addr); driver_remove_file(&drv->driver, &driver_attr_nonidx_data); driver_remove_file(&drv->driver, &driver_attr_idx_addr); driver_remove_file(&drv->driver, &driver_attr_idx_data); driver_remove_file(&drv->driver, &driver_attr_wqm_quanta); } /** * nes_init_module - module initialization entry point */ static int __init nes_init_module(void) { int retval; int retval1; retval = nes_cm_start(); if (retval) { printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n"); return retval; } retval = pci_register_driver(&nes_pci_driver); if (retval >= 0) { retval1 = nes_create_driver_sysfs(&nes_pci_driver); if (retval1 < 0) printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n"); } return retval; } /** * nes_exit_module - module unload entry point */ static void __exit nes_exit_module(void) { nes_cm_stop(); nes_remove_driver_sysfs(&nes_pci_driver); pci_unregister_driver(&nes_pci_driver); } module_init(nes_init_module); module_exit(nes_exit_module);
gpl-2.0
Split-Screen/android_kernel_xiaomi_armani
drivers/w1/masters/ds1wm.c
7808
15717
/* * 1-wire busmaster driver for DS1WM and ASICs with embedded DS1WMs * such as HP iPAQs (including h5xxx, h2200, and devices with ASIC3 * like hx4700). * * Copyright (c) 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu> * Copyright (c) 2004-2007, Matt Reimer <mreimer@vpop.net> * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/mfd/ds1wm.h> #include <linux/slab.h> #include <asm/io.h> #include "../w1.h" #include "../w1_int.h" #define DS1WM_CMD 0x00 /* R/W 4 bits command */ #define DS1WM_DATA 0x01 /* R/W 8 bits, transmit/receive buffer */ #define DS1WM_INT 0x02 /* R/W interrupt status */ #define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ #define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ #define DS1WM_CNTRL 0x05 /* R/W master control register (not used yet) */ #define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ #define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ #define DS1WM_CMD_DQ_OUTPUT (1 << 2) /* write only - forces bus low */ #define DS1WM_CMD_DQ_INPUT (1 << 3) /* read only - reflects state of bus */ #define DS1WM_CMD_RST (1 << 5) /* software reset */ #define DS1WM_CMD_OD (1 << 7) /* overdrive */ #define DS1WM_INT_PD (1 << 0) /* presence detect */ #define DS1WM_INT_PDR (1 << 1) /* presence detect result */ #define DS1WM_INT_TBE (1 << 2) /* tx buffer empty */ #define DS1WM_INT_TSRE (1 << 3) /* tx shift register empty */ #define DS1WM_INT_RBF (1 << 4) /* rx buffer full */ #define DS1WM_INT_RSRF (1 << 5) /* rx shift register full */ #define DS1WM_INTEN_EPD (1 << 0) /* enable presence detect int */ #define DS1WM_INTEN_IAS (1 << 1) /* INTR active state */ #define DS1WM_INTEN_ETBE (1 << 2) /* enable tx buffer empty int */ #define DS1WM_INTEN_ETMT (1 << 3) /* enable tx shift register empty int */ #define DS1WM_INTEN_ERBF (1 << 4) /* enable rx buffer full int */ #define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ #define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ #define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */ #define DS1WM_TIMEOUT (HZ * 5) static struct { unsigned long freq; unsigned long divisor; } freq[] = { { 1000000, 0x80 }, { 2000000, 0x84 }, { 3000000, 0x81 }, { 4000000, 0x88 }, { 5000000, 0x82 }, { 6000000, 0x85 }, { 7000000, 0x83 }, { 8000000, 0x8c }, { 10000000, 0x86 }, { 12000000, 0x89 }, { 14000000, 0x87 }, { 16000000, 0x90 }, { 20000000, 0x8a }, { 24000000, 0x8d }, { 28000000, 0x8b }, { 32000000, 0x94 }, { 40000000, 0x8e }, { 48000000, 0x91 }, { 56000000, 0x8f }, { 64000000, 0x98 }, { 80000000, 0x92 }, { 96000000, 0x95 }, { 112000000, 0x93 }, { 128000000, 0x9c }, /* you can continue this table, consult the OPERATION - CLOCK DIVISOR section of the ds1wm spec sheet. */ }; struct ds1wm_data { void __iomem *map; int bus_shift; /* # of shifts to calc register offsets */ struct platform_device *pdev; const struct mfd_cell *cell; int irq; int slave_present; void *reset_complete; void *read_complete; void *write_complete; int read_error; /* last byte received */ u8 read_byte; /* byte to write that makes all intr disabled, */ /* considering active_state (IAS) (optimization) */ u8 int_en_reg_none; unsigned int reset_recover_delay; /* see ds1wm.h */ }; static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, u8 val) { __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) { return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); } static irqreturn_t ds1wm_isr(int isr, void *data) { struct ds1wm_data *ds1wm_data = data; u8 intr; u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN); /* if no bits are set in int enable register (except the IAS) than go no further, reading the regs below has side effects */ if (!(inten & DS1WM_INTEN_NOT_IAS)) return IRQ_NONE; ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, ds1wm_data->int_en_reg_none); /* this read action clears the INTR and certain flags in ds1wm */ intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) { inten &= ~DS1WM_INTEN_ETMT; complete(ds1wm_data->write_complete); } if (intr & DS1WM_INT_RBF) { /* this read clears the RBF flag */ ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, DS1WM_DATA); inten &= ~DS1WM_INTEN_ERBF; if (ds1wm_data->read_complete) complete(ds1wm_data->read_complete); } if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) { inten &= ~DS1WM_INTEN_EPD; complete(ds1wm_data->reset_complete); } ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten); return IRQ_HANDLED; } static int ds1wm_reset(struct ds1wm_data *ds1wm_data) { unsigned long timeleft; DECLARE_COMPLETION_ONSTACK(reset_done); ds1wm_data->reset_complete = &reset_done; /* enable Presence detect only */ ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | ds1wm_data->int_en_reg_none); ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); ds1wm_data->reset_complete = NULL; if (!timeleft) { dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n"); return 1; } if (!ds1wm_data->slave_present) { dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); return 1; } if (ds1wm_data->reset_recover_delay) msleep(ds1wm_data->reset_recover_delay); return 0; } static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) { unsigned long timeleft; DECLARE_COMPLETION_ONSTACK(write_done); ds1wm_data->write_complete = &write_done; ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT); ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); ds1wm_data->write_complete = NULL; if (!timeleft) { dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n"); return -ETIMEDOUT; } return 0; } static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) { unsigned long timeleft; u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none; DECLARE_COMPLETION_ONSTACK(read_done); ds1wm_read_register(ds1wm_data, DS1WM_DATA); ds1wm_data->read_complete = &read_done; ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable); ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data); timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); ds1wm_data->read_complete = NULL; if (!timeleft) { dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n"); ds1wm_data->read_error = -ETIMEDOUT; return 0xFF; } ds1wm_data->read_error = 0; return ds1wm_data->read_byte; } static int ds1wm_find_divisor(int gclk) { int i; for (i = ARRAY_SIZE(freq)-1; i >= 0; --i) if (gclk >= freq[i].freq) return freq[i].divisor; return 0; } static void ds1wm_up(struct ds1wm_data *ds1wm_data) { int divisor; struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data; if (ds1wm_data->cell->enable) ds1wm_data->cell->enable(ds1wm_data->pdev); divisor = ds1wm_find_divisor(plat->clock_rate); dev_dbg(&ds1wm_data->pdev->dev, "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate); if (divisor == 0) { dev_err(&ds1wm_data->pdev->dev, "no suitable divisor for %dHz clock\n", plat->clock_rate); return; } ds1wm_write_register(ds1wm_data, DS1WM_CLKDIV, divisor); /* Let the w1 clock stabilize. */ msleep(1); ds1wm_reset(ds1wm_data); } static void ds1wm_down(struct ds1wm_data *ds1wm_data) { ds1wm_reset(ds1wm_data); /* Disable interrupts. */ ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, ds1wm_data->int_en_reg_none); if (ds1wm_data->cell->disable) ds1wm_data->cell->disable(ds1wm_data->pdev); } /* --------------------------------------------------------------------- */ /* w1 methods */ static u8 ds1wm_read_byte(void *data) { struct ds1wm_data *ds1wm_data = data; return ds1wm_read(ds1wm_data, 0xff); } static void ds1wm_write_byte(void *data, u8 byte) { struct ds1wm_data *ds1wm_data = data; ds1wm_write(ds1wm_data, byte); } static u8 ds1wm_reset_bus(void *data) { struct ds1wm_data *ds1wm_data = data; ds1wm_reset(ds1wm_data); return 0; } static void ds1wm_search(void *data, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found) { struct ds1wm_data *ds1wm_data = data; int i; int ms_discrep_bit = -1; u64 r = 0; /* holds the progress of the search */ u64 r_prime, d; unsigned slaves_found = 0; unsigned int pass = 0; dev_dbg(&ds1wm_data->pdev->dev, "search begin\n"); while (true) { ++pass; if (pass > 100) { dev_dbg(&ds1wm_data->pdev->dev, "too many attempts (100), search aborted\n"); return; } if (ds1wm_reset(ds1wm_data)) { dev_dbg(&ds1wm_data->pdev->dev, "pass: %d reset error (or no slaves)\n", pass); break; } dev_dbg(&ds1wm_data->pdev->dev, "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r); ds1wm_write(ds1wm_data, search_type); dev_dbg(&ds1wm_data->pdev->dev, "pass: %d entering ASM\n", pass); ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); dev_dbg(&ds1wm_data->pdev->dev, "pass: %d begining nibble loop\n", pass); r_prime = 0; d = 0; /* we work one nibble at a time */ /* each nibble is interleaved to form a byte */ for (i = 0; i < 16; i++) { unsigned char resp, _r, _r_prime, _d; _r = (r >> (4*i)) & 0xf; _r = ((_r & 0x1) << 1) | ((_r & 0x2) << 2) | ((_r & 0x4) << 3) | ((_r & 0x8) << 4); /* writes _r, then reads back: */ resp = ds1wm_read(ds1wm_data, _r); if (ds1wm_data->read_error) { dev_err(&ds1wm_data->pdev->dev, "pass: %d nibble: %d read error\n", pass, i); break; } _r_prime = ((resp & 0x02) >> 1) | ((resp & 0x08) >> 2) | ((resp & 0x20) >> 3) | ((resp & 0x80) >> 4); _d = ((resp & 0x01) >> 0) | ((resp & 0x04) >> 1) | ((resp & 0x10) >> 2) | ((resp & 0x40) >> 3); r_prime |= (unsigned long long) _r_prime << (i * 4); d |= (unsigned long long) _d << (i * 4); } if (ds1wm_data->read_error) { dev_err(&ds1wm_data->pdev->dev, "pass: %d read error, retrying\n", pass); break; } dev_dbg(&ds1wm_data->pdev->dev, "pass: %d r\': %0#18llx d:%0#18llx\n", pass, r_prime, d); dev_dbg(&ds1wm_data->pdev->dev, "pass: %d nibble loop complete, exiting ASM\n", pass); ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); dev_dbg(&ds1wm_data->pdev->dev, "pass: %d resetting bus\n", pass); ds1wm_reset(ds1wm_data); if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) { dev_err(&ds1wm_data->pdev->dev, "pass: %d bus error, retrying\n", pass); continue; /* start over */ } dev_dbg(&ds1wm_data->pdev->dev, "pass: %d found %0#18llx\n", pass, r_prime); slave_found(master_dev, r_prime); ++slaves_found; dev_dbg(&ds1wm_data->pdev->dev, "pass: %d complete, preparing next pass\n", pass); /* any discrepency found which we already choose the '1' branch is now is now irrelevant we reveal the next branch with this: */ d &= ~r; /* find last bit set, i.e. the most signif. bit set */ ms_discrep_bit = fls64(d) - 1; dev_dbg(&ds1wm_data->pdev->dev, "pass: %d new d:%0#18llx MS discrep bit:%d\n", pass, d, ms_discrep_bit); /* prev_ms_discrep_bit = ms_discrep_bit; prepare for next ROM search: */ if (ms_discrep_bit == -1) break; r = (r & ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit; } /* end while true */ dev_dbg(&ds1wm_data->pdev->dev, "pass: %d total: %d search done ms d bit pos: %d\n", pass, slaves_found, ms_discrep_bit); } /* --------------------------------------------------------------------- */ static struct w1_bus_master ds1wm_master = { .read_byte = ds1wm_read_byte, .write_byte = ds1wm_write_byte, .reset_bus = ds1wm_reset_bus, .search = ds1wm_search, }; static int ds1wm_probe(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data; struct ds1wm_driver_data *plat; struct resource *res; int ret; if (!pdev) return -ENODEV; ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); if (!ds1wm_data) return -ENOMEM; platform_set_drvdata(pdev, ds1wm_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENXIO; goto err0; } ds1wm_data->map = ioremap(res->start, resource_size(res)); if (!ds1wm_data->map) { ret = -ENOMEM; goto err0; } /* calculate bus shift from mem resource */ ds1wm_data->bus_shift = resource_size(res) >> 3; ds1wm_data->pdev = pdev; ds1wm_data->cell = mfd_get_cell(pdev); if (!ds1wm_data->cell) { ret = -ENODEV; goto err1; } plat = pdev->dev.platform_data; if (!plat) { ret = -ENODEV; goto err1; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { ret = -ENXIO; goto err1; } ds1wm_data->irq = res->start; ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0); ds1wm_data->reset_recover_delay = plat->reset_recover_delay; if (res->flags & IORESOURCE_IRQ_HIGHEDGE) irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); if (res->flags & IORESOURCE_IRQ_LOWEDGE) irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data); if (ret) goto err1; ds1wm_up(ds1wm_data); ds1wm_master.data = (void *)ds1wm_data; ret = w1_add_master_device(&ds1wm_master); if (ret) goto err2; return 0; err2: ds1wm_down(ds1wm_data); free_irq(ds1wm_data->irq, ds1wm_data); err1: iounmap(ds1wm_data->map); err0: kfree(ds1wm_data); return ret; } #ifdef CONFIG_PM static int ds1wm_suspend(struct platform_device *pdev, pm_message_t state) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); ds1wm_down(ds1wm_data); return 0; } static int ds1wm_resume(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); ds1wm_up(ds1wm_data); return 0; } #else #define ds1wm_suspend NULL #define ds1wm_resume NULL #endif static int ds1wm_remove(struct platform_device *pdev) { struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev); w1_remove_master_device(&ds1wm_master); ds1wm_down(ds1wm_data); free_irq(ds1wm_data->irq, ds1wm_data); iounmap(ds1wm_data->map); kfree(ds1wm_data); return 0; } static struct platform_driver ds1wm_driver = { .driver = { .name = "ds1wm", }, .probe = ds1wm_probe, .remove = ds1wm_remove, .suspend = ds1wm_suspend, .resume = ds1wm_resume }; static int __init ds1wm_init(void) { printk("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n"); return platform_driver_register(&ds1wm_driver); } static void __exit ds1wm_exit(void) { platform_driver_unregister(&ds1wm_driver); } module_init(ds1wm_init); module_exit(ds1wm_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " "Matt Reimer <mreimer@vpop.net>," "Jean-Francois Dagenais <dagenaisj@sonatest.com>"); MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
gpl-2.0
AshleyLai/testing1
drivers/net/wireless/rtlwifi/rtl8192se/table.c
9600
13995
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * * Created on 2010/ 5/18, 1:41 *****************************************************************************/ #include "table.h" u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH] = { 0x01c, 0x07000000, 0x800, 0x00040000, 0x804, 0x00008003, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x10005088, 0x814, 0x020c3d10, 0x818, 0x00200185, 0x81c, 0x00000000, 0x820, 0x01000000, 0x824, 0x00390004, 0x828, 0x01000000, 0x82c, 0x00390004, 0x830, 0x00000004, 0x834, 0x00690200, 0x838, 0x00000004, 0x83c, 0x00690200, 0x840, 0x00010000, 0x844, 0x00010000, 0x848, 0x00000000, 0x84c, 0x00000000, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x48484848, 0x85c, 0x65a965a9, 0x860, 0x0f7f0130, 0x864, 0x0f7f0130, 0x868, 0x0f7f0130, 0x86c, 0x0f7f0130, 0x870, 0x03000700, 0x874, 0x03000300, 0x878, 0x00020002, 0x87c, 0x004f0201, 0x880, 0xa8300ac1, 0x884, 0x00000058, 0x888, 0x00000008, 0x88c, 0x00000004, 0x890, 0x00000000, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x8b0, 0x00000000, 0x8e0, 0x00000000, 0x8e4, 0x00000000, 0xe00, 0x30333333, 0xe04, 0x2a2d2e2f, 0xe08, 0x00003232, 0xe10, 0x30333333, 0xe14, 0x2a2d2e2f, 0xe18, 0x30333333, 0xe1c, 0x2a2d2e2f, 0xe30, 0x01007c00, 0xe34, 0x01004800, 0xe38, 0x1000dc1f, 0xe3c, 0x10008c1f, 0xe40, 0x021400a0, 0xe44, 0x281600a0, 0xe48, 0xf8000001, 0xe4c, 0x00002910, 0xe50, 0x01007c00, 0xe54, 0x01004800, 0xe58, 0x1000dc1f, 0xe5c, 0x10008c1f, 0xe60, 0x021400a0, 0xe64, 0x281600a0, 0xe6c, 0x00002910, 0xe70, 0x31ed92fb, 0xe74, 0x361536fb, 0xe78, 0x361536fb, 0xe7c, 0x361536fb, 0xe80, 0x361536fb, 0xe84, 0x000d92fb, 0xe88, 0x000d92fb, 0xe8c, 0x31ed92fb, 0xed0, 0x31ed92fb, 0xed4, 0x31ed92fb, 0xed8, 0x000d92fb, 0xedc, 0x000d92fb, 0xee0, 0x000d92fb, 0xee4, 0x015e5448, 0xee8, 0x21555448, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x01121313, 0xa00, 0x00d047c8, 0xa04, 0x80ff0008, 0xa08, 0x8ccd8300, 0xa0c, 0x2e62120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x10d30000, 0xc00, 0x40071d40, 0xc04, 0x00a05633, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08000000, 0xc1c, 0x40000100, 0xc20, 0x08000000, 0xc24, 0x40000100, 0xc28, 0x08000000, 0xc2c, 0x40000100, 0xc30, 0x6de9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a979764, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020000, 0xc4c, 0x007f037f, 0xc50, 0x69543420, 0xc54, 0x433c0094, 0xc58, 0x69543420, 0xc5c, 0x433c0094, 0xc60, 0x69543420, 0xc64, 0x433c0094, 0xc68, 0x69543420, 0xc6c, 0x433c0094, 0xc70, 0x2c7f000d, 0xc74, 0x0186155b, 0xc78, 0x0000001f, 0xc7c, 0x00b91612, 0xc80, 0x40000100, 0xc84, 0x20f60000, 0xc88, 0x20000080, 0xc8c, 0x20200000, 0xc90, 0x40000100, 0xc94, 0x00000000, 0xc98, 0x40000100, 0xc9c, 0x00000000, 0xca0, 0x00492492, 0xca4, 0x00000000, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00000750, 0xd04, 0x00000403, 0xd08, 0x0000907f, 0xd0c, 0x00000001, 0xd10, 0xa0633333, 0xd14, 0x33333c63, 0xd18, 0x6a8f5b6b, 0xd1c, 0x00000000, 0xd20, 0x00000000, 0xd24, 0x00000000, 0xd28, 0x00000000, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x00000000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x024dbd02, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x00518a3c, 0xd68, 0x00002101, 0xf14, 0x00000003, 0xf4c, 0x00000000, 0xf00, 0x00000300, }; u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH] = { 0x844, 0xffffffff, 0x00010000, 0x804, 0x0000000f, 0x00000001, 0x824, 0x00f0000f, 0x00300004, 0x82c, 0x00f0000f, 0x00100002, 0x870, 0x04000000, 0x00000001, 0x864, 0x00000400, 0x00000000, 0x878, 0x000f000f, 0x00000002, 0xe74, 0x0f000000, 0x00000002, 0xe78, 0x0f000000, 0x00000002, 0xe7c, 0x0f000000, 0x00000002, 0xe80, 0x0f000000, 0x00000002, 0x90c, 0x000000ff, 0x00000011, 0xc04, 0x000000ff, 0x00000011, 0xd04, 0x0000000f, 0x00000001, 0x1f4, 0xffff0000, 0x00007777, 0x234, 0xf8000000, 0x0000000a, }; u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH] = { 0x804, 0x0000000f, 0x00000003, 0x824, 0x00f0000f, 0x00300004, 0x82c, 0x00f0000f, 0x00300002, 0x870, 0x04000000, 0x00000001, 0x864, 0x00000400, 0x00000000, 0x878, 0x000f000f, 0x00000002, 0xe74, 0x0f000000, 0x00000002, 0xe78, 0x0f000000, 0x00000002, 0xe7c, 0x0f000000, 0x00000002, 0xe80, 0x0f000000, 0x00000002, 0x90c, 0x000000ff, 0x00000011, 0xc04, 0x000000ff, 0x00000033, 0xd04, 0x0000000f, 0x00000003, 0x1f4, 0xffff0000, 0x00007777, 0x234, 0xf8000000, 0x0000000a, }; u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH] = { 0xe00, 0xffffffff, 0x06090909, 0xe04, 0xffffffff, 0x00030406, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x0a0c0d0e, 0xe14, 0xffffffff, 0x04070809, 0xe18, 0xffffffff, 0x0a0c0d0e, 0xe1c, 0xffffffff, 0x04070809, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02040404, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02040404, 0xe1c, 0xffffffff, 0x00000002, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02040404, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02040404, 0xe1c, 0xffffffff, 0x00000002, 0xe00, 0xffffffff, 0x02020202, 0xe04, 0xffffffff, 0x00020202, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02020202, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02020202, 0xe1c, 0xffffffff, 0x00000002, }; u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00030250, 0x002, 0x00010000, 0x010, 0x0008000f, 0x011, 0x000231fc, 0x010, 0x000c000f, 0x011, 0x0003f9f8, 0x010, 0x0002000f, 0x011, 0x00020101, 0x014, 0x0001093e, 0x014, 0x0009093e, 0x015, 0x0000f8f4, 0x017, 0x000f6500, 0x01a, 0x00013056, 0x01b, 0x00060000, 0x01c, 0x00000300, 0x01e, 0x00031059, 0x021, 0x00054000, 0x022, 0x0000083c, 0x023, 0x00001558, 0x024, 0x00000060, 0x025, 0x00022583, 0x026, 0x0000f200, 0x027, 0x000eacf1, 0x028, 0x0009bd54, 0x029, 0x00004582, 0x02a, 0x00000001, 0x02b, 0x00021334, 0x02a, 0x00000000, 0x02b, 0x0000000a, 0x02a, 0x00000001, 0x02b, 0x00000808, 0x02b, 0x00053333, 0x02c, 0x0000000c, 0x02a, 0x00000002, 0x02b, 0x00000808, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000003, 0x02b, 0x00000808, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000004, 0x02b, 0x00000808, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000005, 0x02b, 0x00000709, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x00000006, 0x02b, 0x00000709, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000007, 0x02b, 0x00000709, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000008, 0x02b, 0x00000709, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000009, 0x02b, 0x0000060a, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000a, 0x02b, 0x0000060a, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x0000000b, 0x02b, 0x0000060a, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x0000000c, 0x02b, 0x0000060a, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x0000000d, 0x02b, 0x0000050b, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000e, 0x02b, 0x0000050b, 0x02b, 0x00066623, 0x02c, 0x0000001a, 0x02a, 0x000e4000, 0x030, 0x00020000, 0x031, 0x000b9631, 0x032, 0x0000130d, 0x033, 0x00000187, 0x013, 0x00019e6c, 0x013, 0x00015e94, 0x000, 0x00010159, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x01e, 0x0003105b, 0x0fe, 0x00000000, 0x000, 0x00030159, 0x010, 0x0004000f, 0x011, 0x000203f9, }; u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00001041, 0x002, 0x00011000, 0x005, 0x00080fc0, 0x007, 0x000fc803, 0x013, 0x00017cb0, 0x013, 0x00011cc0, 0x013, 0x0000dc60, 0x013, 0x00008c60, 0x013, 0x00004450, 0x013, 0x00000020, }; u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00001041, 0x002, 0x00011000, 0x005, 0x00080fc0, 0x007, 0x000fc803, }; u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH] = { 0x020, 0x00000035, 0x048, 0x0000000e, 0x049, 0x000000f0, 0x04a, 0x00000077, 0x04b, 0x00000083, 0x0b5, 0x00000021, 0x0dc, 0x000000ff, 0x0dd, 0x000000ff, 0x0de, 0x000000ff, 0x0df, 0x000000ff, 0x116, 0x00000000, 0x117, 0x00000000, 0x118, 0x00000000, 0x119, 0x00000000, 0x11a, 0x00000000, 0x11b, 0x00000000, 0x11c, 0x00000000, 0x11d, 0x00000000, 0x160, 0x0000000b, 0x161, 0x0000000b, 0x162, 0x0000000b, 0x163, 0x0000000b, 0x164, 0x0000000b, 0x165, 0x0000000b, 0x166, 0x0000000b, 0x167, 0x0000000b, 0x168, 0x0000000b, 0x169, 0x0000000b, 0x16a, 0x0000000b, 0x16b, 0x0000000b, 0x16c, 0x0000000b, 0x16d, 0x0000000b, 0x16e, 0x0000000b, 0x16f, 0x0000000b, 0x170, 0x0000000b, 0x171, 0x0000000b, 0x172, 0x0000000b, 0x173, 0x0000000b, 0x174, 0x0000000b, 0x175, 0x0000000b, 0x176, 0x0000000b, 0x177, 0x0000000b, 0x178, 0x0000000b, 0x179, 0x0000000b, 0x17a, 0x0000000b, 0x17b, 0x0000000b, 0x17c, 0x0000000b, 0x17d, 0x0000000b, 0x17e, 0x0000000b, 0x17f, 0x0000000b, 0x236, 0x0000000c, 0x503, 0x00000022, 0x560, 0x00000000, }; u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH] = { 0xc78, 0x7f000001, 0xc78, 0x7f010001, 0xc78, 0x7e020001, 0xc78, 0x7d030001, 0xc78, 0x7c040001, 0xc78, 0x7b050001, 0xc78, 0x7a060001, 0xc78, 0x79070001, 0xc78, 0x78080001, 0xc78, 0x77090001, 0xc78, 0x760a0001, 0xc78, 0x750b0001, 0xc78, 0x740c0001, 0xc78, 0x730d0001, 0xc78, 0x720e0001, 0xc78, 0x710f0001, 0xc78, 0x70100001, 0xc78, 0x6f110001, 0xc78, 0x6f120001, 0xc78, 0x6e130001, 0xc78, 0x6d140001, 0xc78, 0x6d150001, 0xc78, 0x6c160001, 0xc78, 0x6b170001, 0xc78, 0x6a180001, 0xc78, 0x6a190001, 0xc78, 0x691a0001, 0xc78, 0x681b0001, 0xc78, 0x671c0001, 0xc78, 0x661d0001, 0xc78, 0x651e0001, 0xc78, 0x641f0001, 0xc78, 0x63200001, 0xc78, 0x4c210001, 0xc78, 0x4b220001, 0xc78, 0x4a230001, 0xc78, 0x49240001, 0xc78, 0x48250001, 0xc78, 0x47260001, 0xc78, 0x46270001, 0xc78, 0x45280001, 0xc78, 0x44290001, 0xc78, 0x2c2a0001, 0xc78, 0x2b2b0001, 0xc78, 0x2a2c0001, 0xc78, 0x292d0001, 0xc78, 0x282e0001, 0xc78, 0x272f0001, 0xc78, 0x26300001, 0xc78, 0x25310001, 0xc78, 0x24320001, 0xc78, 0x23330001, 0xc78, 0x22340001, 0xc78, 0x09350001, 0xc78, 0x08360001, 0xc78, 0x07370001, 0xc78, 0x06380001, 0xc78, 0x05390001, 0xc78, 0x043a0001, 0xc78, 0x033b0001, 0xc78, 0x023c0001, 0xc78, 0x013d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7f400001, 0xc78, 0x7f410001, 0xc78, 0x7e420001, 0xc78, 0x7d430001, 0xc78, 0x7c440001, 0xc78, 0x7b450001, 0xc78, 0x7a460001, 0xc78, 0x79470001, 0xc78, 0x78480001, 0xc78, 0x77490001, 0xc78, 0x764a0001, 0xc78, 0x754b0001, 0xc78, 0x744c0001, 0xc78, 0x734d0001, 0xc78, 0x724e0001, 0xc78, 0x714f0001, 0xc78, 0x70500001, 0xc78, 0x6f510001, 0xc78, 0x6f520001, 0xc78, 0x6e530001, 0xc78, 0x6d540001, 0xc78, 0x6d550001, 0xc78, 0x6c560001, 0xc78, 0x6b570001, 0xc78, 0x6a580001, 0xc78, 0x6a590001, 0xc78, 0x695a0001, 0xc78, 0x685b0001, 0xc78, 0x675c0001, 0xc78, 0x665d0001, 0xc78, 0x655e0001, 0xc78, 0x645f0001, 0xc78, 0x63600001, 0xc78, 0x4c610001, 0xc78, 0x4b620001, 0xc78, 0x4a630001, 0xc78, 0x49640001, 0xc78, 0x48650001, 0xc78, 0x47660001, 0xc78, 0x46670001, 0xc78, 0x45680001, 0xc78, 0x44690001, 0xc78, 0x2c6a0001, 0xc78, 0x2b6b0001, 0xc78, 0x2a6c0001, 0xc78, 0x296d0001, 0xc78, 0x286e0001, 0xc78, 0x276f0001, 0xc78, 0x26700001, 0xc78, 0x25710001, 0xc78, 0x24720001, 0xc78, 0x23730001, 0xc78, 0x22740001, 0xc78, 0x09750001, 0xc78, 0x08760001, 0xc78, 0x07770001, 0xc78, 0x06780001, 0xc78, 0x05790001, 0xc78, 0x047a0001, 0xc78, 0x037b0001, 0xc78, 0x027c0001, 0xc78, 0x017d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x3000001e, 0xc78, 0x3001001e, 0xc78, 0x3002001e, 0xc78, 0x3003001e, 0xc78, 0x3004001e, 0xc78, 0x3405001e, 0xc78, 0x3806001e, 0xc78, 0x3e07001e, 0xc78, 0x3e08001e, 0xc78, 0x4409001e, 0xc78, 0x460a001e, 0xc78, 0x480b001e, 0xc78, 0x480c001e, 0xc78, 0x4e0d001e, 0xc78, 0x560e001e, 0xc78, 0x5a0f001e, 0xc78, 0x5e10001e, 0xc78, 0x6211001e, 0xc78, 0x6c12001e, 0xc78, 0x7213001e, 0xc78, 0x7214001e, 0xc78, 0x7215001e, 0xc78, 0x7216001e, 0xc78, 0x7217001e, 0xc78, 0x7218001e, 0xc78, 0x7219001e, 0xc78, 0x721a001e, 0xc78, 0x721b001e, 0xc78, 0x721c001e, 0xc78, 0x721d001e, 0xc78, 0x721e001e, 0xc78, 0x721f001e, };
gpl-2.0
Smando87/smdk4412_kernel
kernel/power/suspend_test.c
9856
5091
/* * kernel/power/suspend_test.c - Suspend to RAM and standby test facility. * * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz> * * This file is released under the GPLv2. */ #include <linux/init.h> #include <linux/rtc.h> #include "power.h" /* * We test the system suspend code by setting an RTC wakealarm a short * time in the future, then suspending. Suspending the devices won't * normally take long ... some systems only need a few milliseconds. * * The time it takes is system-specific though, so when we test this * during system bootup we allow a LOT of time. */ #define TEST_SUSPEND_SECONDS 10 static unsigned long suspend_test_start_time; void suspend_test_start(void) { /* FIXME Use better timebase than "jiffies", ideally a clocksource. * What we want is a hardware counter that will work correctly even * during the irqs-are-off stages of the suspend/resume cycle... */ suspend_test_start_time = jiffies; } void suspend_test_finish(const char *label) { long nj = jiffies - suspend_test_start_time; unsigned msec; msec = jiffies_to_msecs(abs(nj)); pr_info("PM: %s took %d.%03d seconds\n", label, msec / 1000, msec % 1000); /* Warning on suspend means the RTC alarm period needs to be * larger -- the system was sooo slooowwww to suspend that the * alarm (should have) fired before the system went to sleep! * * Warning on either suspend or resume also means the system * has some performance issues. The stack dump of a WARN_ON * is more likely to get the right attention than a printk... */ WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s, time: %u\n", label, msec); } /* * To test system suspend, we need a hands-off mechanism to resume the * system. RTCs wake alarms are a common self-contained mechanism. */ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) { static char err_readtime[] __initdata = KERN_ERR "PM: can't read %s time, err %d\n"; static char err_wakealarm [] __initdata = KERN_ERR "PM: can't set %s wakealarm, err %d\n"; static char err_suspend[] __initdata = KERN_ERR "PM: suspend test failed, error %d\n"; static char info_test[] __initdata = KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; unsigned long now; struct rtc_wkalrm alm; int status; /* this may fail if the RTC hasn't been initialized */ status = rtc_read_time(rtc, &alm.time); if (status < 0) { printk(err_readtime, dev_name(&rtc->dev), status); return; } rtc_tm_to_time(&alm.time, &now); memset(&alm, 0, sizeof alm); rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); alm.enabled = true; status = rtc_set_alarm(rtc, &alm); if (status < 0) { printk(err_wakealarm, dev_name(&rtc->dev), status); return; } if (state == PM_SUSPEND_MEM) { printk(info_test, pm_states[state]); status = pm_suspend(state); if (status == -ENODEV) state = PM_SUSPEND_STANDBY; } if (state == PM_SUSPEND_STANDBY) { printk(info_test, pm_states[state]); status = pm_suspend(state); } if (status < 0) printk(err_suspend, status); /* Some platforms can't detect that the alarm triggered the * wakeup, or (accordingly) disable it after it afterwards. * It's supposed to give oneshot behavior; cope. */ alm.enabled = false; rtc_set_alarm(rtc, &alm); } static int __init has_wakealarm(struct device *dev, void *name_ptr) { struct rtc_device *candidate = to_rtc_device(dev); if (!candidate->ops->set_alarm) return 0; if (!device_may_wakeup(candidate->dev.parent)) return 0; *(const char **)name_ptr = dev_name(dev); return 1; } /* * Kernel options like "test_suspend=mem" force suspend/resume sanity tests * at startup time. They're normally disabled, for faster boot and because * we can't know which states really work on this particular system. */ static suspend_state_t test_state __initdata = PM_SUSPEND_ON; static char warn_bad_state[] __initdata = KERN_WARNING "PM: can't test '%s' suspend state\n"; static int __init setup_test_suspend(char *value) { unsigned i; /* "=mem" ==> "mem" */ value++; for (i = 0; i < PM_SUSPEND_MAX; i++) { if (!pm_states[i]) continue; if (strcmp(pm_states[i], value) != 0) continue; test_state = (__force suspend_state_t) i; return 0; } printk(warn_bad_state, value); return 0; } __setup("test_suspend", setup_test_suspend); static int __init test_suspend(void) { static char warn_no_rtc[] __initdata = KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n"; char *pony = NULL; struct rtc_device *rtc = NULL; /* PM is initialized by now; is that state testable? */ if (test_state == PM_SUSPEND_ON) goto done; if (!valid_state(test_state)) { printk(warn_bad_state, pm_states[test_state]); goto done; } /* RTCs have initialized by now too ... can we use one? */ class_find_device(rtc_class, NULL, &pony, has_wakealarm); if (pony) rtc = rtc_class_open(pony); if (!rtc) { printk(warn_no_rtc); goto done; } /* go for it */ test_wakealarm(rtc, test_state); rtc_class_close(rtc); done: return 0; } late_initcall(test_suspend);
gpl-2.0
CyanideL/android_kernel_moto_shamu
drivers/zorro/zorro-sysfs.c
11648
3155
/* * File Attributes for Zorro Devices * * Copyright (C) 2003 Geert Uytterhoeven * * Loosely based on drivers/pci/pci-sysfs.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/zorro.h> #include <linux/stat.h> #include <linux/string.h> #include "zorro.h" /* show configuration fields */ #define zorro_config_attr(name, field, format_string) \ static ssize_t \ show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct zorro_dev *z; \ \ z = to_zorro_dev(dev); \ return sprintf(buf, format_string, z->field); \ } \ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); zorro_config_attr(id, id, "0x%08x\n"); zorro_config_attr(type, rom.er_Type, "0x%02x\n"); zorro_config_attr(serial, rom.er_SerialNumber, "0x%08x\n"); zorro_config_attr(slotaddr, slotaddr, "0x%04x\n"); zorro_config_attr(slotsize, slotsize, "0x%04x\n"); static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf) { struct zorro_dev *z = to_zorro_dev(dev); return sprintf(buf, "0x%08lx 0x%08lx 0x%08lx\n", (unsigned long)zorro_resource_start(z), (unsigned long)zorro_resource_end(z), zorro_resource_flags(z)); } static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL); static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device, kobj)); struct ConfigDev cd; /* Construct a ConfigDev */ memset(&cd, 0, sizeof(cd)); cd.cd_Rom = z->rom; cd.cd_SlotAddr = z->slotaddr; cd.cd_SlotSize = z->slotsize; cd.cd_BoardAddr = (void *)zorro_resource_start(z); cd.cd_BoardSize = zorro_resource_len(z); return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd)); } static struct bin_attribute zorro_config_attr = { .attr = { .name = "config", .mode = S_IRUGO, }, .size = sizeof(struct ConfigDev), .read = zorro_read_config, }; static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zorro_dev *z = to_zorro_dev(dev); return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id); } static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL); int zorro_create_sysfs_dev_files(struct zorro_dev *z) { struct device *dev = &z->dev; int error; /* current configuration's attributes */ if ((error = device_create_file(dev, &dev_attr_id)) || (error = device_create_file(dev, &dev_attr_type)) || (error = device_create_file(dev, &dev_attr_serial)) || (error = device_create_file(dev, &dev_attr_slotaddr)) || (error = device_create_file(dev, &dev_attr_slotsize)) || (error = device_create_file(dev, &dev_attr_resource)) || (error = device_create_file(dev, &dev_attr_modalias)) || (error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr))) return error; return 0; }
gpl-2.0
ktd2004/linux-stable
drivers/staging/rtl8723au/os_dep/os_intfs.c
129
24415
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _OS_INTFS_C_ #include <osdep_service.h> #include <drv_types.h> #include <xmit_osdep.h> #include <recv_osdep.h> #include <hal_intf.h> #include <rtw_version.h> #include <rtl8723a_hal.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek Wireless Lan Driver"); MODULE_AUTHOR("Realtek Semiconductor Corp."); MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); MODULE_VERSION(DRIVERVERSION); MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin"); MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin"); MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin"); /* module param defaults */ static int rtw_chip_version = 0x00; static int rtw_rfintfs = HWPI; static int rtw_debug = 1; static int rtw_channel = 1;/* ad-hoc support requirement */ static int rtw_wireless_mode = WIRELESS_11BG_24N; static int rtw_vrtl_carrier_sense = AUTO_VCS; static int rtw_vcs_type = RTS_CTS;/* */ static int rtw_rts_thresh = 2347;/* */ static int rtw_frag_thresh = 2346;/* */ static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */ static int rtw_scan_mode = 1;/* active, passive */ static int rtw_adhoc_tx_pwr = 1; static int rtw_soft_ap; static int rtw_power_mgnt = 1; static int rtw_ips_mode = IPS_NORMAL; static int rtw_smart_ps = 2; module_param(rtw_ips_mode, int, 0644); MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode"); static int rtw_long_retry_lmt = 7; static int rtw_short_retry_lmt = 7; static int rtw_busy_thresh = 40; static int rtw_ack_policy = NORMAL_ACK; static int rtw_acm_method;/* 0:By SW 1:By HW. */ static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */ static int rtw_uapsd_enable; static int rtw_ht_enable = 1; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */ static int rtw_cbw40_enable = 3; static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable * 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */ static int rtw_rx_stbc = 1; static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */ /* Use 2 path Tx to transmit MCS0~7 and legacy mode */ static int rtw_lowrate_two_xmit = 1; /* int rf_config = RF_1T2R; 1T2R */ static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */ static int rtw_low_power; static int rtw_wifi_spec; static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX; #ifdef CONFIG_8723AU_BT_COEXIST static int rtw_btcoex_enable = 1; static int rtw_bt_iso = 2;/* 0:Low, 1:High, 2:From Efuse */ /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, 5.OtherBusy */ static int rtw_bt_sco = 3; /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ static int rtw_bt_ampdu = 1; #endif /* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */ static int rtw_AcceptAddbaReq = true; static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */ static int rtw_antdiv_type; /* 0:decide by efuse */ static int rtw_enusbss;/* 0:disable, 1:enable */ static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */ static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */ static int rtw_hw_wps_pbc = 1; static int rtw_80211d; static int rtw_regulatory_id = 0xff;/* Regulatory tab id, 0xff = follow efuse's setting */ module_param(rtw_regulatory_id, int, 0644); static char *ifname = "wlan%d"; module_param(ifname, charp, 0644); MODULE_PARM_DESC(ifname, "The default name to allocate for first interface"); static char *if2name = "wlan%d"; module_param(if2name, charp, 0644); MODULE_PARM_DESC(if2name, "The default name to allocate for second interface"); module_param(rtw_channel_plan, int, 0644); module_param(rtw_chip_version, int, 0644); module_param(rtw_rfintfs, int, 0644); module_param(rtw_channel, int, 0644); module_param(rtw_wmm_enable, int, 0644); module_param(rtw_vrtl_carrier_sense, int, 0644); module_param(rtw_vcs_type, int, 0644); module_param(rtw_busy_thresh, int, 0644); module_param(rtw_ht_enable, int, 0644); module_param(rtw_cbw40_enable, int, 0644); module_param(rtw_ampdu_enable, int, 0644); module_param(rtw_rx_stbc, int, 0644); module_param(rtw_ampdu_amsdu, int, 0644); module_param(rtw_lowrate_two_xmit, int, 0644); module_param(rtw_rf_config, int, 0644); module_param(rtw_power_mgnt, int, 0644); module_param(rtw_smart_ps, int, 0644); module_param(rtw_low_power, int, 0644); module_param(rtw_wifi_spec, int, 0644); module_param(rtw_antdiv_cfg, int, 0644); module_param(rtw_enusbss, int, 0644); module_param(rtw_hwpdn_mode, int, 0644); module_param(rtw_hwpwrp_detect, int, 0644); module_param(rtw_hw_wps_pbc, int, 0644); static uint rtw_max_roaming_times = 2; module_param(rtw_max_roaming_times, uint, 0644); MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try"); module_param(rtw_80211d, int, 0644); MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism"); #ifdef CONFIG_8723AU_BT_COEXIST module_param(rtw_btcoex_enable, int, 0644); MODULE_PARM_DESC(rtw_btcoex_enable, "Enable BT co-existence mechanism"); #endif static uint rtw_notch_filter; module_param(rtw_notch_filter, uint, 0644); MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P"); module_param_named(debug, rtw_debug, int, 0444); MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)"); static int netdev_close(struct net_device *pnetdev); static int loadparam(struct rtw_adapter *padapter, struct net_device *pnetdev) { struct registry_priv *registry_par = &padapter->registrypriv; GlobalDebugLevel23A = rtw_debug; registry_par->chip_version = (u8)rtw_chip_version; registry_par->rfintfs = (u8)rtw_rfintfs; memcpy(registry_par->ssid.ssid, "ANY", 3); registry_par->ssid.ssid_len = 3; registry_par->channel = (u8)rtw_channel; registry_par->wireless_mode = (u8)rtw_wireless_mode; registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense; registry_par->vcs_type = (u8)rtw_vcs_type; registry_par->rts_thresh = (u16)rtw_rts_thresh; registry_par->frag_thresh = (u16)rtw_frag_thresh; registry_par->preamble = (u8)rtw_preamble; registry_par->scan_mode = (u8)rtw_scan_mode; registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr; registry_par->soft_ap = (u8)rtw_soft_ap; registry_par->smart_ps = (u8)rtw_smart_ps; registry_par->power_mgnt = (u8)rtw_power_mgnt; registry_par->ips_mode = (u8)rtw_ips_mode; registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt; registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt; registry_par->busy_thresh = (u16)rtw_busy_thresh; registry_par->ack_policy = (u8)rtw_ack_policy; registry_par->acm_method = (u8)rtw_acm_method; /* UAPSD */ registry_par->wmm_enable = (u8)rtw_wmm_enable; registry_par->uapsd_enable = (u8)rtw_uapsd_enable; registry_par->ht_enable = (u8)rtw_ht_enable; registry_par->cbw40_enable = (u8)rtw_cbw40_enable; registry_par->ampdu_enable = (u8)rtw_ampdu_enable; registry_par->rx_stbc = (u8)rtw_rx_stbc; registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu; registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit; registry_par->rf_config = (u8)rtw_rf_config; registry_par->low_power = (u8)rtw_low_power; registry_par->wifi_spec = (u8)rtw_wifi_spec; registry_par->channel_plan = (u8)rtw_channel_plan; #ifdef CONFIG_8723AU_BT_COEXIST registry_par->btcoex = (u8)rtw_btcoex_enable; registry_par->bt_iso = (u8)rtw_bt_iso; registry_par->bt_sco = (u8)rtw_bt_sco; registry_par->bt_ampdu = (u8)rtw_bt_ampdu; #endif registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq; registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg; registry_par->antdiv_type = (u8)rtw_antdiv_type; /* 0:disable, 1:enable, 2:by EFUSE config */ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode; /* 0:disable, 1:enable */ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect; registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc; registry_par->max_roaming_times = (u8)rtw_max_roaming_times; registry_par->enable80211d = (u8)rtw_80211d; snprintf(registry_par->ifname, 16, "%s", ifname); snprintf(registry_par->if2name, 16, "%s", if2name); registry_par->notch_filter = (u8)rtw_notch_filter; registry_par->regulatory_tid = (u8)rtw_regulatory_id; return _SUCCESS; } static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct sockaddr *addr = p; if (!padapter->bup) ether_addr_copy(padapter->eeprompriv.mac_addr, addr->sa_data); return 0; } static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct recv_priv *precvpriv = &padapter->recvpriv; padapter->stats.tx_packets = pxmitpriv->tx_pkts; padapter->stats.rx_packets = precvpriv->rx_pkts; padapter->stats.tx_dropped = pxmitpriv->tx_drop; padapter->stats.rx_dropped = precvpriv->rx_drop; padapter->stats.tx_bytes = pxmitpriv->tx_bytes; padapter->stats.rx_bytes = precvpriv->rx_bytes; return &padapter->stats; } /* * AC to queue mapping * * AC_VO -> queue 0 * AC_VI -> queue 1 * AC_BE -> queue 2 * AC_BK -> queue 3 */ static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 }; /* Given a data frame determine the 802.1p/1d tag to use. */ static u32 rtw_classify8021d(struct sk_buff *skb) { u32 dscp; /* skb->priority values from 256->263 are magic values to * directly indicate a specific 802.1d priority. This is used * to allow 802.1d priority to be passed directly in from VLAN * tags, etc. */ if (skb->priority >= 256 && skb->priority <= 263) return skb->priority - 256; switch (skb->protocol) { case htons(ETH_P_IP): dscp = ip_hdr(skb)->tos & 0xfc; break; default: return 0; } return dscp >> 5; } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct rtw_adapter *padapter = netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; skb->priority = rtw_classify8021d(skb); if (pmlmepriv->acm_mask != 0) skb->priority = qos_acm23a(pmlmepriv->acm_mask, skb->priority); return rtw_1d_to_queue[skb->priority]; } u16 rtw_recv_select_queue23a(struct sk_buff *skb) { struct iphdr *piphdr; struct ethhdr *eth = (struct ethhdr *)skb->data; unsigned int dscp; u16 eth_type = get_unaligned_be16(&eth->h_proto); u32 priority; u8 *pdata = skb->data; switch (eth_type) { case ETH_P_IP: piphdr = (struct iphdr *)(pdata + ETH_HLEN); dscp = piphdr->tos & 0xfc; priority = dscp >> 5; break; default: priority = 0; } return rtw_1d_to_queue[priority]; } static const struct net_device_ops rtw_netdev_ops = { .ndo_open = netdev_open23a, .ndo_stop = netdev_close, .ndo_start_xmit = rtw_xmit23a_entry23a, .ndo_select_queue = rtw_select_queue, .ndo_set_mac_address = rtw_net_set_mac_address, .ndo_get_stats = rtw_net_get_stats, }; int rtw_init_netdev23a_name23a(struct net_device *pnetdev, const char *ifname) { if (dev_alloc_name(pnetdev, ifname) < 0) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n")); } netif_carrier_off(pnetdev); return 0; } static const struct device_type wlan_type = { .name = "wlan", }; struct net_device *rtw_init_netdev23a(struct rtw_adapter *old_padapter) { struct rtw_adapter *padapter; struct net_device *pnetdev; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n")); pnetdev = alloc_etherdev_mq(sizeof(struct rtw_adapter), 4); if (!pnetdev) return NULL; pnetdev->dev.type = &wlan_type; padapter = netdev_priv(pnetdev); padapter->pnetdev = pnetdev; DBG_8723A("register rtw_netdev_ops to netdev_ops\n"); pnetdev->netdev_ops = &rtw_netdev_ops; pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */ /* step 2. */ loadparam(padapter, pnetdev); return pnetdev; } static int rtw_init_default_value(struct rtw_adapter *padapter) { struct registry_priv *pregistrypriv = &padapter->registrypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; /* xmit_priv */ pxmitpriv->vcs = pregistrypriv->vcs_type; /* pxmitpriv->rts_thresh = pregistrypriv->rts_thresh; */ pxmitpriv->frag_len = pregistrypriv->frag_thresh; /* mlme_priv */ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ pmlmepriv->scan_mode = SCAN_ACTIVE; /* ht_priv */ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */ /* security_priv */ psecuritypriv->binstallGrpkey = 0; /* open system */ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; psecuritypriv->dot11PrivacyAlgrthm = 0; psecuritypriv->dot11PrivacyKeyIndex = 0; psecuritypriv->dot118021XGrpPrivacy = 0; psecuritypriv->dot118021XGrpKeyid = 1; psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen; psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled; /* registry_priv */ rtw_init_registrypriv_dev_network23a(padapter); rtw_update_registrypriv_dev_network23a(padapter); /* hal_priv */ rtl8723a_init_default_value(padapter); /* misc. */ padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; return _SUCCESS; } int rtw_reset_drv_sw23a(struct rtw_adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; /* hal_priv */ rtl8723a_init_default_value(padapter); padapter->bReadPortCancel = false; padapter->bWritePortCancel = false; pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ padapter->xmitpriv.tx_pkts = 0; padapter->recvpriv.rx_pkts = 0; pmlmepriv->LinkDetectInfo.bBusyTraffic = false; _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING); rtw_sreset_reset_value(padapter); pwrctrlpriv->pwr_state_check_cnts = 0; /* mlmeextpriv */ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE; rtw_set_signal_stat_timer(&padapter->recvpriv); return _SUCCESS; } int rtw_init_drv_sw23a(struct rtw_adapter *padapter) { int ret8 = _SUCCESS; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw23a\n")); if (rtw_init_cmd_priv23a(&padapter->cmdpriv) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n")); ret8 = _FAIL; goto exit; } padapter->cmdpriv.padapter = padapter; if (rtw_init_evt_priv23a(&padapter->evtpriv) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n")); ret8 = _FAIL; goto exit; } if (rtw_init_mlme_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n")); ret8 = _FAIL; goto exit; } if (init_mlme_ext_priv23a(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n")); ret8 = _FAIL; goto exit; } if (_rtw_init_xmit_priv23a(&padapter->xmitpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_xmit_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_recv_priv23a(&padapter->recvpriv, padapter) == _FAIL) { DBG_8723A("Can't _rtw_init_recv_priv23a\n"); ret8 = _FAIL; goto exit; } if (_rtw_init_sta_priv23a(&padapter->stapriv) == _FAIL) { DBG_8723A("Can't _rtw_init_sta_priv23a\n"); ret8 = _FAIL; goto exit; } padapter->stapriv.padapter = padapter; padapter->setband = GHZ24_50; rtw_init_bcmc_stainfo23a(padapter); rtw_init_pwrctrl_priv23a(padapter); ret8 = rtw_init_default_value(padapter); rtl8723a_init_dm_priv(padapter); rtw_sreset_init(padapter); exit: RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw23a\n")); return ret8; } void rtw_cancel_all_timer23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer23a\n")); del_timer_sync(&padapter->mlmepriv.assoc_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel association timer complete!\n", __func__)); del_timer_sync(&padapter->mlmepriv.scan_to_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel scan_to_timer!\n", __func__)); del_timer_sync(&padapter->mlmepriv.dynamic_chk_timer); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel dynamic_chk_timer!\n", __func__)); del_timer_sync(&padapter->pwrctrlpriv.pwr_state_check_timer); del_timer_sync(&padapter->mlmepriv.set_scan_deny_timer); rtw_clear_scan_deny(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("%s:cancel set_scan_deny_timer!\n", __func__)); del_timer_sync(&padapter->recvpriv.signal_stat_timer); } int rtw_free_drv_sw23a(struct rtw_adapter *padapter) { RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw23a")); free_mlme_ext_priv23a(&padapter->mlmeextpriv); rtw_free_evt_priv23a(&padapter->evtpriv); rtw_free_mlme_priv23a(&padapter->mlmepriv); _rtw_free_xmit_priv23a(&padapter->xmitpriv); /* will free bcmc_stainfo here */ _rtw_free_sta_priv23a(&padapter->stapriv); _rtw_free_recv_priv23a(&padapter->recvpriv); rtw_free_pwrctrl_priv(padapter); kfree(padapter->HalData); padapter->HalData = NULL; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw23a\n")); return _SUCCESS; } static int _rtw_drv_register_netdev(struct rtw_adapter *padapter, char *name) { struct net_device *pnetdev = padapter->pnetdev; int ret = _SUCCESS; /* alloc netdev name */ rtw_init_netdev23a_name23a(pnetdev, name); ether_addr_copy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr); /* Tell the network stack we exist */ if (register_netdev(pnetdev)) { DBG_8723A("%s(%s): Failed!\n", __func__, pnetdev->name); ret = _FAIL; goto error_register_netdev; } DBG_8723A("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr)); return ret; error_register_netdev: if (padapter->iface_id > IFACE_ID0) { rtw_free_drv_sw23a(padapter); free_netdev(pnetdev); } return ret; } int rtw_drv_register_netdev(struct rtw_adapter *if1) { struct dvobj_priv *dvobj = if1->dvobj; int i, status = _SUCCESS; if (dvobj->iface_nums >= IFACE_ID_MAX) { status = _FAIL; /* -EINVAL */ goto exit; } for (i = 0; i < dvobj->iface_nums; i++) { struct rtw_adapter *padapter = dvobj->padapters[i]; if (padapter) { char *name; if (padapter->iface_id == IFACE_ID0) name = if1->registrypriv.ifname; else if (padapter->iface_id == IFACE_ID1) name = if1->registrypriv.if2name; else name = "wlan%d"; status = _rtw_drv_register_netdev(padapter, name); if (status != _SUCCESS) break; } } exit: return status; } int netdev_open23a(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv; int ret = 0; int status; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - dev_open\n")); DBG_8723A("+871x_drv - drv_open, bup =%d\n", padapter->bup); mutex_lock(&adapter_to_dvobj(padapter)->hw_init_mutex); pwrctrlpriv = &padapter->pwrctrlpriv; if (!padapter->bup) { padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtl8723au_hal_init(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open23a_error; } DBG_8723A("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr)); if (init_hw_mlme_ext23a(padapter) == _FAIL) { DBG_8723A("can't init mlme_ext_priv\n"); goto netdev_open23a_error; } rtl8723au_inirp_init(padapter); rtw_cfg80211_init_wiphy(padapter); padapter->bup = true; } padapter->net_closed = false; mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(2000)); padapter->pwrctrlpriv.bips_processing = false; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); /* netif_carrier_on(pnetdev);call this func when rtw23a_joinbss_event_cb return success */ if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_start_all_queues(pnetdev); else netif_tx_wake_all_queues(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - dev_open\n")); DBG_8723A("-871x_drv - drv_open, bup =%d\n", padapter->bup); exit: mutex_unlock(&adapter_to_dvobj(padapter)->hw_init_mutex); return ret; netdev_open23a_error: padapter->bup = false; netif_carrier_off(pnetdev); netif_tx_stop_all_queues(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-871x_drv - dev_open, fail!\n")); DBG_8723A("-871x_drv - drv_open fail, bup =%d\n", padapter->bup); ret = -1; goto exit; } static int ips_netdrv_open(struct rtw_adapter *padapter) { int status = _SUCCESS; padapter->net_closed = false; DBG_8723A("===> %s.........\n", __func__); padapter->bDriverStopped = false; padapter->bSurpriseRemoved = false; padapter->bCardDisableWOHSM = false; status = rtl8723au_hal_init(padapter); if (status == _FAIL) { RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n")); goto netdev_open23a_error; } rtl8723au_inirp_init(padapter); rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); mod_timer(&padapter->mlmepriv.dynamic_chk_timer, jiffies + msecs_to_jiffies(5000)); return _SUCCESS; netdev_open23a_error: /* padapter->bup = false; */ DBG_8723A("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup); return _FAIL; } int rtw_ips_pwr_up23a(struct rtw_adapter *padapter) { int result; unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_up23a..............\n"); rtw_reset_drv_sw23a(padapter); result = ips_netdrv_open(padapter); DBG_8723A("<=== rtw_ips_pwr_up23a.............. in %dms\n", jiffies_to_msecs(jiffies - start_time)); return result; } void rtw_ips_pwr_down23a(struct rtw_adapter *padapter) { unsigned long start_time = jiffies; DBG_8723A("===> rtw_ips_pwr_down23a...................\n"); padapter->bCardDisableWOHSM = true; padapter->net_closed = true; rtw_ips_dev_unload23a(padapter); padapter->bCardDisableWOHSM = false; DBG_8723A("<=== rtw_ips_pwr_down23a..................... in %dms\n", jiffies_to_msecs(jiffies - start_time)); } void rtw_ips_dev_unload23a(struct rtw_adapter *padapter) { rtl8723a_fifo_cleanup(padapter); rtl8723a_usb_intf_stop(padapter); /* s5. */ if (!padapter->bSurpriseRemoved) rtl8723au_hal_deinit(padapter); } int pm_netdev_open23a(struct net_device *pnetdev, u8 bnormal) { int status; if (bnormal) status = netdev_open23a(pnetdev); else status = (_SUCCESS == ips_netdrv_open(netdev_priv(pnetdev))) ? (0) : (-1); return status; } static int netdev_close(struct net_device *pnetdev) { struct rtw_adapter *padapter = netdev_priv(pnetdev); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n")); padapter->net_closed = true; if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) { DBG_8723A("(2)871x_drv - drv_close, bup =%d, " "hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed); /* s1. */ if (pnetdev) { if (!rtw_netif_queue_stopped(pnetdev)) netif_tx_stop_all_queues(pnetdev); } /* s2. */ LeaveAllPowerSaveMode23a(padapter); rtw_disassoc_cmd23a(padapter, 500, false); /* s2-2. indicate disconnect to os */ rtw_indicate_disconnect23a(padapter); /* s2-3. */ rtw_free_assoc_resources23a(padapter, 1); /* s2-4. */ rtw_free_network_queue23a(padapter); } rtw_scan_abort23a(padapter); RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); return 0; } void rtw_ndev_destructor(struct net_device *ndev) { DBG_8723A("%s(%s)\n", __func__, ndev->name); kfree(ndev->ieee80211_ptr); free_netdev(ndev); } void _rtw_init_queue23a(struct rtw_queue *pqueue) { INIT_LIST_HEAD(&pqueue->queue); spin_lock_init(&pqueue->lock); }
gpl-2.0
amir73il/ext4-snapshots
drivers/gpu/drm/i915/intel_opregion.c
385
14449
/* * Copyright 2008 Intel Corporation <hong.liu@intel.com> * Copyright 2008 Red Hat <mjg@redhat.com> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/acpi.h> #include <linux/acpi_io.h> #include <acpi/video.h> #include "drmP.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_drv.h" #define PCI_ASLE 0xe4 #define PCI_ASLS 0xfc #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define ACPI_CLID 0x01ac /* current lid state indicator */ #define ACPI_CDCK 0x01b0 /* current docking state indicator */ #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x400 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) #define MBOX_SWSCI (1<<1) #define MBOX_ASLE (1<<2) struct opregion_header { u8 signature[16]; u32 size; u32 opregion_ver; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u8 reserved[164]; } __attribute__((packed)); /* OpRegion mailbox #1: public ACPI methods */ struct opregion_acpi { u32 drdy; /* driver readiness */ u32 csts; /* notification status */ u32 cevt; /* current event */ u8 rsvd1[20]; u32 didl[8]; /* supported display devices ID list */ u32 cpdl[8]; /* currently presented display list */ u32 cadl[8]; /* currently active display list */ u32 nadl[8]; /* next active devices list */ u32 aslp; /* ASL sleep time-out */ u32 tidx; /* toggle table index */ u32 chpd; /* current hotplug enable indicator */ u32 clid; /* current lid state*/ u32 cdck; /* current docking state */ u32 sxsw; /* Sx state resume */ u32 evts; /* ASL supported events */ u32 cnot; /* current OS notification */ u32 nrdy; /* driver status */ u8 rsvd2[60]; } __attribute__((packed)); /* OpRegion mailbox #2: SWSCI */ struct opregion_swsci { u32 scic; /* SWSCI command|status|data */ u32 parm; /* command parameters */ u32 dslp; /* driver sleep time-out */ u8 rsvd[244]; } __attribute__((packed)); /* OpRegion mailbox #3: ASLE */ struct opregion_asle { u32 ardy; /* driver readiness */ u32 aslc; /* ASLE interrupt command */ u32 tche; /* technology enabled indicator */ u32 alsi; /* current ALS illuminance reading */ u32 bclp; /* backlight brightness to set */ u32 pfit; /* panel fitting state */ u32 cblv; /* current brightness level */ u16 bclm[20]; /* backlight level duty cycle mapping table */ u32 cpfm; /* current panel fitting mode */ u32 epfm; /* enabled panel fitting modes */ u8 plut[74]; /* panel LUT and identifier */ u32 pfmb; /* PWM freq and min brightness */ u8 rsvd[102]; } __attribute__((packed)); /* ASLE irq request bits */ #define ASLE_SET_ALS_ILLUM (1 << 0) #define ASLE_SET_BACKLIGHT (1 << 1) #define ASLE_SET_PFIT (1 << 2) #define ASLE_SET_PWM_FREQ (1 << 3) #define ASLE_REQ_MSK 0xf /* response bits of ASLE irq request */ #define ASLE_ALS_ILLUM_FAILED (1<<10) #define ASLE_BACKLIGHT_FAILED (1<<12) #define ASLE_PFIT_FAILED (1<<14) #define ASLE_PWM_FREQ_FAILED (1<<16) /* ASLE backlight brightness to set */ #define ASLE_BCLP_VALID (1<<31) #define ASLE_BCLP_MSK (~(1<<31)) /* ASLE panel fitting request */ #define ASLE_PFIT_VALID (1<<31) #define ASLE_PFIT_CENTER (1<<0) #define ASLE_PFIT_STRETCH_TEXT (1<<1) #define ASLE_PFIT_STRETCH_GFX (1<<2) /* PWM frequency and minimum brightness */ #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) #define ASLE_PFMB_PWM_MASK (0x7ffffe00) #define ASLE_PFMB_PWM_VALID (1<<31) #define ASLE_CBLV_VALID (1<<31) #define ACPI_OTHER_OUTPUT (0<<8) #define ACPI_VGA_OUTPUT (1<<8) #define ACPI_TV_OUTPUT (2<<8) #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) #ifdef CONFIG_ACPI static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 max; if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; if (bclp > 255) return ASLE_BACKLIGHT_FAILED; max = intel_panel_get_max_backlight(dev); intel_panel_set_backlight(dev, bclp * max / 255); asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; return 0; } static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ return 0; } static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) { struct drm_i915_private *dev_priv = dev->dev_private; if (pfmb & ASLE_PFMB_PWM_VALID) { u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; pwm = pwm >> 9; /* FIXME - what do we do with the PWM? */ } return 0; } static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ if (!(pfit & ASLE_PFIT_VALID)) return ASLE_PFIT_FAILED; return 0; } void intel_opregion_asle_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = asle->aslc & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) asle_stat |= asle_set_als_illum(dev, asle->alsi); if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) asle_stat |= asle_set_pfit(dev, asle->pfit); if (asle_req & ASLE_SET_PWM_FREQ) asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); asle->aslc = asle_stat; } void intel_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = asle->aslc & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) { DRM_DEBUG_DRIVER("Illum is not supported\n"); asle_stat |= ASLE_ALS_ILLUM_FAILED; } if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, asle->bclp); if (asle_req & ASLE_SET_PFIT) { DRM_DEBUG_DRIVER("Pfit is not supported\n"); asle_stat |= ASLE_PFIT_FAILED; } if (asle_req & ASLE_SET_PWM_FREQ) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); asle_stat |= ASLE_PWM_FREQ_FAILED; } asle->aslc = asle_stat; } #define ASLE_ALS_EN (1<<0) #define ASLE_BLC_EN (1<<1) #define ASLE_PFIT_EN (1<<2) #define ASLE_PFMB_EN (1<<3) void intel_opregion_enable_asle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { if (IS_MOBILE(dev)) intel_enable_asle(dev); asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; asle->ardy = 1; } } #define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) static struct intel_opregion *system_opregion; static int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) { /* The only video events relevant to opregion are 0x80. These indicate either a docking event, lid switch or display switch request. In Linux, these are handled by the dock, button and video drivers. */ struct opregion_acpi *acpi; struct acpi_bus_event *event = data; int ret = NOTIFY_OK; if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; if (!system_opregion) return NOTIFY_DONE; acpi = system_opregion->acpi; if (event->type == 0x80 && !(acpi->cevt & 0x1)) ret = NOTIFY_BAD; acpi->csts = 0; return ret; } static struct notifier_block intel_opregion_notifier = { .notifier_call = intel_opregion_video_event, }; /* * Initialise the DIDL field in opregion. This passes a list of devices to * the firmware. Values are defined by section B.4.2 of the ACPI specification * (version 3) */ static void intel_didl_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; unsigned long long device_id; acpi_status status; int i = 0; handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) return; if (acpi_is_video_device(acpi_dev)) acpi_video_bus = acpi_dev; else { list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { if (acpi_is_video_device(acpi_cdev)) { acpi_video_bus = acpi_cdev; break; } } } if (!acpi_video_bus) { printk(KERN_WARNING "No ACPI video bus found\n"); return; } list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { if (i >= 8) { dev_printk (KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR", NULL, &device_id); if (ACPI_SUCCESS(status)) { if (!device_id) goto blind_set; opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f); i++; } } end: /* If fewer than 8 outputs, the list must be null terminated */ if (i < 8) opregion->acpi->didl[i] = 0; return; blind_set: i = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { dev_printk (KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } switch (connector->connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: output_type = ACPI_VGA_OUTPUT; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: case DRM_MODE_CONNECTOR_9PinDIN: output_type = ACPI_TV_OUTPUT; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: output_type = ACPI_DIGITAL_OUTPUT; break; case DRM_MODE_CONNECTOR_LVDS: output_type = ACPI_LVDS_OUTPUT; break; } opregion->acpi->didl[i] |= (1<<31) | output_type | i; i++; } goto end; } void intel_opregion_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_didl_outputs(dev); /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. * We don't actually need to do anything with them. */ opregion->acpi->csts = 0; opregion->acpi->drdy = 1; system_opregion = opregion; register_acpi_notifier(&intel_opregion_notifier); } if (opregion->asle) intel_opregion_enable_asle(dev); } void intel_opregion_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { opregion->acpi->drdy = 0; system_opregion = NULL; unregister_acpi_notifier(&intel_opregion_notifier); } /* just clear all opregion memory pointers now */ iounmap(opregion->header); opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; opregion->vbt = NULL; } #endif int intel_opregion_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; void *base; u32 asls, mboxes; int err = 0; pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); return -ENOTSUPP; } base = acpi_os_ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; if (memcmp(base, OPREGION_SIGNATURE, 16)) { DRM_DEBUG_DRIVER("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } opregion->header = base; opregion->vbt = base + OPREGION_VBT_OFFSET; opregion->lid_state = base + ACPI_CLID; mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; } if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; } if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; } return 0; err_out: iounmap(base); return err; }
gpl-2.0
TripNRaVeR/tripndroid-endeavoru-3.1.10-htc
arch/arm/mach-pxa/viper.c
385
22512
/* * linux/arch/arm/mach-pxa/viper.c * * Support for the Arcom VIPER SBC. * * Author: Ian Campbell * Created: Feb 03, 2003 * Copyright: Arcom Control Systems * * Maintained by Marc Zyngier <maz@misterjones.org> * <marc.zyngier@altran.com> * * Based on lubbock.c: * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/memory.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/module.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/gpio.h> #include <linux/jiffies.h> #include <linux/i2c-gpio.h> #include <linux/i2c/pxa-i2c.h> #include <linux/serial_8250.h> #include <linux/smc91x.h> #include <linux/pwm_backlight.h> #include <linux/usb/isp116x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/syscore_ops.h> #include <mach/pxa25x.h> #include <mach/audio.h> #include <mach/pxafb.h> #include <mach/regs-uart.h> #include <mach/arcom-pcmcia.h> #include <mach/viper.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include "generic.h" #include "devices.h" static unsigned int icr; static void viper_icr_set_bit(unsigned int bit) { icr |= bit; VIPER_ICR = icr; } static void viper_icr_clear_bit(unsigned int bit) { icr &= ~bit; VIPER_ICR = icr; } /* This function is used from the pcmcia module to reset the CF */ static void viper_cf_reset(int state) { if (state) viper_icr_set_bit(VIPER_ICR_CF_RST); else viper_icr_clear_bit(VIPER_ICR_CF_RST); } static struct arcom_pcmcia_pdata viper_pcmcia_info = { .cd_gpio = VIPER_CF_CD_GPIO, .rdy_gpio = VIPER_CF_RDY_GPIO, .pwr_gpio = VIPER_CF_POWER_GPIO, .reset = viper_cf_reset, }; static struct platform_device viper_pcmcia_device = { .name = "viper-pcmcia", .id = -1, .dev = { .platform_data = &viper_pcmcia_info, }, }; /* * The CPLD version register was not present on VIPER boards prior to * v2i1. On v1 boards where the version register is not present we * will just read back the previous value from the databus. * * Therefore we do two reads. The first time we write 0 to the * (read-only) register before reading and the second time we write * 0xff first. If the two reads do not match or they read back as 0xff * or 0x00 then we have version 1 hardware. */ static u8 viper_hw_version(void) { u8 v1, v2; unsigned long flags; local_irq_save(flags); VIPER_VERSION = 0; v1 = VIPER_VERSION; VIPER_VERSION = 0xff; v2 = VIPER_VERSION; v1 = (v1 != v2 || v1 == 0xff) ? 0 : v1; local_irq_restore(flags); return v1; } /* CPU system core operations. */ static int viper_cpu_suspend(void) { viper_icr_set_bit(VIPER_ICR_R_DIS); return 0; } static void viper_cpu_resume(void) { viper_icr_clear_bit(VIPER_ICR_R_DIS); } static struct syscore_ops viper_cpu_syscore_ops = { .suspend = viper_cpu_suspend, .resume = viper_cpu_resume, }; static unsigned int current_voltage_divisor; /* * If force is not true then step from existing to new divisor. If * force is true then jump straight to the new divisor. Stepping is * used because if the jump in voltage is too large, the VCC can dip * too low and the regulator cuts out. * * force can be used to initialize the divisor to a know state by * setting the value for the current clock speed, since we are already * running at that speed we know the voltage should be pretty close so * the jump won't be too large */ static void viper_set_core_cpu_voltage(unsigned long khz, int force) { int i = 0; unsigned int divisor = 0; const char *v; if (khz < 200000) { v = "1.0"; divisor = 0xfff; } else if (khz < 300000) { v = "1.1"; divisor = 0xde5; } else { v = "1.3"; divisor = 0x325; } pr_debug("viper: setting CPU core voltage to %sV at %d.%03dMHz\n", v, (int)khz / 1000, (int)khz % 1000); #define STEP 0x100 do { int step; if (force) step = divisor; else if (current_voltage_divisor < divisor - STEP) step = current_voltage_divisor + STEP; else if (current_voltage_divisor > divisor + STEP) step = current_voltage_divisor - STEP; else step = divisor; force = 0; gpio_set_value(VIPER_PSU_CLK_GPIO, 0); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 0); for (i = 1 << 11 ; i > 0 ; i >>= 1) { udelay(1); gpio_set_value(VIPER_PSU_DATA_GPIO, step & i); udelay(1); gpio_set_value(VIPER_PSU_CLK_GPIO, 1); udelay(1); gpio_set_value(VIPER_PSU_CLK_GPIO, 0); } udelay(1); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 1); udelay(1); gpio_set_value(VIPER_PSU_nCS_LD_GPIO, 0); current_voltage_divisor = step; } while (current_voltage_divisor != divisor); } /* Interrupt handling */ static unsigned long viper_irq_enabled_mask; static const int viper_isa_irqs[] = { 3, 4, 5, 6, 7, 10, 11, 12, 9, 14, 15 }; static const int viper_isa_irq_map[] = { 0, /* ISA irq #0, invalid */ 0, /* ISA irq #1, invalid */ 0, /* ISA irq #2, invalid */ 1 << 0, /* ISA irq #3 */ 1 << 1, /* ISA irq #4 */ 1 << 2, /* ISA irq #5 */ 1 << 3, /* ISA irq #6 */ 1 << 4, /* ISA irq #7 */ 0, /* ISA irq #8, invalid */ 1 << 8, /* ISA irq #9 */ 1 << 5, /* ISA irq #10 */ 1 << 6, /* ISA irq #11 */ 1 << 7, /* ISA irq #12 */ 0, /* ISA irq #13, invalid */ 1 << 9, /* ISA irq #14 */ 1 << 10, /* ISA irq #15 */ }; static inline int viper_irq_to_bitmask(unsigned int irq) { return viper_isa_irq_map[irq - PXA_ISA_IRQ(0)]; } static inline int viper_bit_to_irq(int bit) { return viper_isa_irqs[bit] + PXA_ISA_IRQ(0); } static void viper_ack_irq(struct irq_data *d) { int viper_irq = viper_irq_to_bitmask(d->irq); if (viper_irq & 0xff) VIPER_LO_IRQ_STATUS = viper_irq; else VIPER_HI_IRQ_STATUS = (viper_irq >> 8); } static void viper_mask_irq(struct irq_data *d) { viper_irq_enabled_mask &= ~(viper_irq_to_bitmask(d->irq)); } static void viper_unmask_irq(struct irq_data *d) { viper_irq_enabled_mask |= viper_irq_to_bitmask(d->irq); } static inline unsigned long viper_irq_pending(void) { return (VIPER_HI_IRQ_STATUS << 8 | VIPER_LO_IRQ_STATUS) & viper_irq_enabled_mask; } static void viper_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending; pending = viper_irq_pending(); do { /* we're in a chained irq handler, * so ack the interrupt by hand */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = viper_bit_to_irq(__ffs(pending)); generic_handle_irq(irq); } pending = viper_irq_pending(); } while (pending); } static struct irq_chip viper_irq_chip = { .name = "ISA", .irq_ack = viper_ack_irq, .irq_mask = viper_mask_irq, .irq_unmask = viper_unmask_irq }; static void __init viper_init_irq(void) { int level; int isa_irq; pxa25x_init_irq(); /* setup ISA IRQs */ for (level = 0; level < ARRAY_SIZE(viper_isa_irqs); level++) { isa_irq = viper_bit_to_irq(level); irq_set_chip_and_handler(isa_irq, &viper_irq_chip, handle_edge_irq); set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE); } irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO), viper_irq_handler); irq_set_irq_type(gpio_to_irq(VIPER_CPLD_GPIO), IRQ_TYPE_EDGE_BOTH); } /* Flat Panel */ static struct pxafb_mode_info fb_mode_info[] = { { .pixclock = 157500, .xres = 320, .yres = 240, .bpp = 16, .hsync_len = 63, .left_margin = 7, .right_margin = 13, .vsync_len = 20, .upper_margin = 0, .lower_margin = 0, .sync = 0, }, }; static struct pxafb_mach_info fb_info = { .modes = fb_mode_info, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static int viper_backlight_init(struct device *dev) { int ret; /* GPIO9 and 10 control FB backlight. Initialise to off */ ret = gpio_request(VIPER_BCKLIGHT_EN_GPIO, "Backlight"); if (ret) goto err_request_bckl; ret = gpio_request(VIPER_LCD_EN_GPIO, "LCD"); if (ret) goto err_request_lcd; ret = gpio_direction_output(VIPER_BCKLIGHT_EN_GPIO, 0); if (ret) goto err_dir; ret = gpio_direction_output(VIPER_LCD_EN_GPIO, 0); if (ret) goto err_dir; return 0; err_dir: gpio_free(VIPER_LCD_EN_GPIO); err_request_lcd: gpio_free(VIPER_BCKLIGHT_EN_GPIO); err_request_bckl: dev_err(dev, "Failed to setup LCD GPIOs\n"); return ret; } static int viper_backlight_notify(struct device *dev, int brightness) { gpio_set_value(VIPER_LCD_EN_GPIO, !!brightness); gpio_set_value(VIPER_BCKLIGHT_EN_GPIO, !!brightness); return brightness; } static void viper_backlight_exit(struct device *dev) { gpio_free(VIPER_LCD_EN_GPIO); gpio_free(VIPER_BCKLIGHT_EN_GPIO); } static struct platform_pwm_backlight_data viper_backlight_data = { .pwm_id = 0, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 1000000, .init = viper_backlight_init, .notify = viper_backlight_notify, .exit = viper_backlight_exit, }; static struct platform_device viper_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa25x_device_pwm0.dev, .platform_data = &viper_backlight_data, }, }; /* Ethernet */ static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = VIPER_ETH_PHYS + 0x300, .end = VIPER_ETH_PHYS + 0x30f, .flags = IORESOURCE_MEM, }, [1] = { .start = gpio_to_irq(VIPER_ETH_GPIO), .end = gpio_to_irq(VIPER_ETH_GPIO), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, [2] = { .name = "smc91x-data32", .start = VIPER_ETH_DATA_PHYS, .end = VIPER_ETH_DATA_PHYS + 3, .flags = IORESOURCE_MEM, }, }; static struct smc91x_platdata viper_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &viper_smc91x_info, }, }; /* i2c */ static struct i2c_gpio_platform_data i2c_bus_data = { .sda_pin = VIPER_RTC_I2C_SDA_GPIO, .scl_pin = VIPER_RTC_I2C_SCL_GPIO, .udelay = 10, .timeout = HZ, }; static struct platform_device i2c_bus_device = { .name = "i2c-gpio", .id = 1, /* pxa2xx-i2c is bus 0, so start at 1 */ .dev = { .platform_data = &i2c_bus_data, } }; static struct i2c_board_info __initdata viper_i2c_devices[] = { { I2C_BOARD_INFO("ds1338", 0x68), }, }; /* * Serial configuration: * You can either have the standard PXA ports driven by the PXA driver, * or all the ports (PXA + 16850) driven by the 8250 driver. * Choose your poison. */ static struct resource viper_serial_resources[] = { #ifndef CONFIG_SERIAL_PXA { .start = 0x40100000, .end = 0x4010001f, .flags = IORESOURCE_MEM, }, { .start = 0x40200000, .end = 0x4020001f, .flags = IORESOURCE_MEM, }, { .start = 0x40700000, .end = 0x4070001f, .flags = IORESOURCE_MEM, }, { .start = VIPER_UARTA_PHYS, .end = VIPER_UARTA_PHYS + 0xf, .flags = IORESOURCE_MEM, }, { .start = VIPER_UARTB_PHYS, .end = VIPER_UARTB_PHYS + 0xf, .flags = IORESOURCE_MEM, }, #else { 0, }, #endif }; static struct plat_serial8250_port serial_platform_data[] = { #ifndef CONFIG_SERIAL_PXA /* Internal UARTs */ { .membase = (void *)&FFUART, .mapbase = __PREG(FFUART), .irq = IRQ_FFUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, { .membase = (void *)&BTUART, .mapbase = __PREG(BTUART), .irq = IRQ_BTUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, { .membase = (void *)&STUART, .mapbase = __PREG(STUART), .irq = IRQ_STUART, .uartclk = 921600 * 16, .regshift = 2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, }, /* External UARTs */ { .mapbase = VIPER_UARTA_PHYS, .irq = gpio_to_irq(VIPER_UARTA_GPIO), .irqflags = IRQF_TRIGGER_RISING, .uartclk = 1843200, .regshift = 1, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST, }, { .mapbase = VIPER_UARTB_PHYS, .irq = gpio_to_irq(VIPER_UARTB_GPIO), .irqflags = IRQF_TRIGGER_RISING, .uartclk = 1843200, .regshift = 1, .iotype = UPIO_MEM, .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SKIP_TEST, }, #endif { }, }; static struct platform_device serial_device = { .name = "serial8250", .id = 0, .dev = { .platform_data = serial_platform_data, }, .num_resources = ARRAY_SIZE(viper_serial_resources), .resource = viper_serial_resources, }; /* USB */ static void isp116x_delay(struct device *dev, int delay) { ndelay(delay); } static struct resource isp116x_resources[] = { [0] = { /* DATA */ .start = VIPER_USB_PHYS + 0, .end = VIPER_USB_PHYS + 1, .flags = IORESOURCE_MEM, }, [1] = { /* ADDR */ .start = VIPER_USB_PHYS + 2, .end = VIPER_USB_PHYS + 3, .flags = IORESOURCE_MEM, }, [2] = { .start = gpio_to_irq(VIPER_USB_GPIO), .end = gpio_to_irq(VIPER_USB_GPIO), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; /* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ static struct isp116x_platform_data isp116x_platform_data = { /* Enable internal resistors on downstream ports */ .sel15Kres = 1, /* On-chip overcurrent protection */ .oc_enable = 1, /* INT output polarity */ .int_act_high = 1, /* INT edge or level triggered */ .int_edge_triggered = 0, /* WAKEUP pin connected - NOT SUPPORTED */ /* .remote_wakeup_connected = 0, */ /* Wakeup by devices on usb bus enabled */ .remote_wakeup_enable = 0, .delay = isp116x_delay, }; static struct platform_device isp116x_device = { .name = "isp116x-hcd", .id = -1, .num_resources = ARRAY_SIZE(isp116x_resources), .resource = isp116x_resources, .dev = { .platform_data = &isp116x_platform_data, }, }; /* MTD */ static struct resource mtd_resources[] = { [0] = { /* RedBoot config + filesystem flash */ .start = VIPER_FLASH_PHYS, .end = VIPER_FLASH_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }, [1] = { /* Boot flash */ .start = VIPER_BOOT_PHYS, .end = VIPER_BOOT_PHYS + SZ_1M - 1, .flags = IORESOURCE_MEM, }, [2] = { /* * SRAM size is actually 256KB, 8bits, with a sparse mapping * (each byte is on a 16bit boundary). */ .start = _VIPER_SRAM_BASE, .end = _VIPER_SRAM_BASE + SZ_512K - 1, .flags = IORESOURCE_MEM, }, }; static struct mtd_partition viper_boot_flash_partition = { .name = "RedBoot", .size = SZ_1M, .offset = 0, .mask_flags = MTD_WRITEABLE, /* force R/O */ }; static struct physmap_flash_data viper_flash_data[] = { [0] = { .width = 2, .parts = NULL, .nr_parts = 0, }, [1] = { .width = 2, .parts = &viper_boot_flash_partition, .nr_parts = 1, }, }; static struct platform_device viper_mtd_devices[] = { [0] = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &viper_flash_data[0], }, .resource = &mtd_resources[0], .num_resources = 1, }, [1] = { .name = "physmap-flash", .id = 1, .dev = { .platform_data = &viper_flash_data[1], }, .resource = &mtd_resources[1], .num_resources = 1, }, }; static struct platform_device *viper_devs[] __initdata = { &smc91x_device, &i2c_bus_device, &serial_device, &isp116x_device, &viper_mtd_devices[0], &viper_mtd_devices[1], &viper_backlight_device, &viper_pcmcia_device, }; static mfp_cfg_t viper_pin_config[] __initdata = { /* Chip selects */ GPIO15_nCS_1, GPIO78_nCS_2, GPIO79_nCS_3, GPIO80_nCS_4, GPIO33_nCS_5, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* FP Backlight */ GPIO9_GPIO, /* VIPER_BCKLIGHT_EN_GPIO */ GPIO10_GPIO, /* VIPER_LCD_EN_GPIO */ GPIO16_PWM0_OUT, /* Ethernet PHY Ready */ GPIO18_RDY, /* Serial shutdown */ GPIO12_GPIO | MFP_LPM_DRIVE_HIGH, /* VIPER_UART_SHDN_GPIO */ /* Compact-Flash / PC104 */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO54_nPSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO8_GPIO, /* VIPER_CF_RDY_GPIO */ GPIO32_GPIO, /* VIPER_CF_CD_GPIO */ GPIO82_GPIO, /* VIPER_CF_POWER_GPIO */ /* Integrated UPS control */ GPIO20_GPIO, /* VIPER_UPS_GPIO */ /* Vcc regulator control */ GPIO6_GPIO, /* VIPER_PSU_DATA_GPIO */ GPIO11_GPIO, /* VIPER_PSU_CLK_GPIO */ GPIO19_GPIO, /* VIPER_PSU_nCS_LD_GPIO */ /* i2c busses */ GPIO26_GPIO, /* VIPER_TPM_I2C_SDA_GPIO */ GPIO27_GPIO, /* VIPER_TPM_I2C_SCL_GPIO */ GPIO83_GPIO, /* VIPER_RTC_I2C_SDA_GPIO */ GPIO84_GPIO, /* VIPER_RTC_I2C_SCL_GPIO */ /* PC/104 Interrupt */ GPIO1_GPIO | WAKEUP_ON_EDGE_RISE, /* VIPER_CPLD_GPIO */ }; static unsigned long viper_tpm; static int __init viper_tpm_setup(char *str) { strict_strtoul(str, 10, &viper_tpm); return 1; } __setup("tpm=", viper_tpm_setup); static void __init viper_tpm_init(void) { struct platform_device *tpm_device; struct i2c_gpio_platform_data i2c_tpm_data = { .sda_pin = VIPER_TPM_I2C_SDA_GPIO, .scl_pin = VIPER_TPM_I2C_SCL_GPIO, .udelay = 10, .timeout = HZ, }; char *errstr; /* Allocate TPM i2c bus if requested */ if (!viper_tpm) return; tpm_device = platform_device_alloc("i2c-gpio", 2); if (tpm_device) { if (!platform_device_add_data(tpm_device, &i2c_tpm_data, sizeof(i2c_tpm_data))) { if (platform_device_add(tpm_device)) { errstr = "register TPM i2c bus"; goto error_free_tpm; } } else { errstr = "allocate TPM i2c bus data"; goto error_free_tpm; } } else { errstr = "allocate TPM i2c device"; goto error_tpm; } return; error_free_tpm: kfree(tpm_device); error_tpm: pr_err("viper: Couldn't %s, giving up\n", errstr); } static void __init viper_init_vcore_gpios(void) { if (gpio_request(VIPER_PSU_DATA_GPIO, "PSU data")) goto err_request_data; if (gpio_request(VIPER_PSU_CLK_GPIO, "PSU clock")) goto err_request_clk; if (gpio_request(VIPER_PSU_nCS_LD_GPIO, "PSU cs")) goto err_request_cs; if (gpio_direction_output(VIPER_PSU_DATA_GPIO, 0) || gpio_direction_output(VIPER_PSU_CLK_GPIO, 0) || gpio_direction_output(VIPER_PSU_nCS_LD_GPIO, 0)) goto err_dir; /* c/should assume redboot set the correct level ??? */ viper_set_core_cpu_voltage(get_clk_frequency_khz(0), 1); return; err_dir: gpio_free(VIPER_PSU_nCS_LD_GPIO); err_request_cs: gpio_free(VIPER_PSU_CLK_GPIO); err_request_clk: gpio_free(VIPER_PSU_DATA_GPIO); err_request_data: pr_err("viper: Failed to setup vcore control GPIOs\n"); } static void __init viper_init_serial_gpio(void) { if (gpio_request(VIPER_UART_SHDN_GPIO, "UARTs shutdown")) goto err_request; if (gpio_direction_output(VIPER_UART_SHDN_GPIO, 0)) goto err_dir; return; err_dir: gpio_free(VIPER_UART_SHDN_GPIO); err_request: pr_err("viper: Failed to setup UART shutdown GPIO\n"); } #ifdef CONFIG_CPU_FREQ static int viper_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; /* TODO: Adjust timings??? */ switch (val) { case CPUFREQ_PRECHANGE: if (freq->old < freq->new) { /* we are getting faster so raise the voltage * before we change freq */ viper_set_core_cpu_voltage(freq->new, 0); } break; case CPUFREQ_POSTCHANGE: if (freq->old > freq->new) { /* we are slowing down so drop the power * after we change freq */ viper_set_core_cpu_voltage(freq->new, 0); } break; case CPUFREQ_RESUMECHANGE: viper_set_core_cpu_voltage(freq->new, 0); break; default: /* ignore */ break; } return 0; } static struct notifier_block viper_cpufreq_notifier_block = { .notifier_call = viper_cpufreq_notifier }; static void __init viper_init_cpufreq(void) { if (cpufreq_register_notifier(&viper_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER)) pr_err("viper: Failed to setup cpufreq notifier\n"); } #else static inline void viper_init_cpufreq(void) {} #endif static void viper_power_off(void) { pr_notice("Shutting off UPS\n"); gpio_set_value(VIPER_UPS_GPIO, 1); /* Spin to death... */ while (1); } static void __init viper_init(void) { u8 version; pm_power_off = viper_power_off; pxa2xx_mfp_config(ARRAY_AND_SIZE(viper_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); /* Wake-up serial console */ viper_init_serial_gpio(); pxa_set_fb_info(NULL, &fb_info); /* v1 hardware cannot use the datacs line */ version = viper_hw_version(); if (version == 0) smc91x_device.num_resources--; pxa_set_i2c_info(NULL); platform_add_devices(viper_devs, ARRAY_SIZE(viper_devs)); viper_init_vcore_gpios(); viper_init_cpufreq(); register_syscore_ops(&viper_cpu_syscore_ops); if (version) { pr_info("viper: hardware v%di%d detected. " "CPLD revision %d.\n", VIPER_BOARD_VERSION(version), VIPER_BOARD_ISSUE(version), VIPER_CPLD_REVISION(version)); system_rev = (VIPER_BOARD_VERSION(version) << 8) | (VIPER_BOARD_ISSUE(version) << 4) | VIPER_CPLD_REVISION(version); } else { pr_info("viper: No version register.\n"); } i2c_register_board_info(1, ARRAY_AND_SIZE(viper_i2c_devices)); viper_tpm_init(); pxa_set_ac97_info(NULL); } static struct map_desc viper_io_desc[] __initdata = { { .virtual = VIPER_CPLD_BASE, .pfn = __phys_to_pfn(VIPER_CPLD_PHYS), .length = 0x00300000, .type = MT_DEVICE, }, { .virtual = VIPER_PC104IO_BASE, .pfn = __phys_to_pfn(0x30000000), .length = 0x00800000, .type = MT_DEVICE, }, }; static void __init viper_map_io(void) { pxa25x_map_io(); iotable_init(viper_io_desc, ARRAY_SIZE(viper_io_desc)); PCFR |= PCFR_OPDE; } MACHINE_START(VIPER, "Arcom/Eurotech VIPER SBC") /* Maintainer: Marc Zyngier <maz@misterjones.org> */ .boot_params = 0xa0000100, .map_io = viper_map_io, .init_irq = viper_init_irq, .handle_irq = pxa25x_handle_irq, .timer = &pxa_timer, .init_machine = viper_init, MACHINE_END
gpl-2.0
nickpack/htc-kernel-saga
fs/smbfs/smbiod.c
897
7662
/* * smbiod.c * * Copyright (C) 2000, Charles Loep / Corel Corp. * Copyright (C) 2001, Urban Widmark */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/file.h> #include <linux/dcache.h> #include <linux/module.h> #include <linux/net.h> #include <linux/kthread.h> #include <net/ip.h> #include <linux/smb_fs.h> #include <linux/smbno.h> #include <linux/smb_mount.h> #include <asm/system.h> #include <asm/uaccess.h> #include "smb_debug.h" #include "request.h" #include "proto.h" enum smbiod_state { SMBIOD_DEAD, SMBIOD_STARTING, SMBIOD_RUNNING, }; static enum smbiod_state smbiod_state = SMBIOD_DEAD; static struct task_struct *smbiod_thread; static DECLARE_WAIT_QUEUE_HEAD(smbiod_wait); static LIST_HEAD(smb_servers); static DEFINE_SPINLOCK(servers_lock); #define SMBIOD_DATA_READY (1<<0) static unsigned long smbiod_flags; static int smbiod(void *); static int smbiod_start(void); /* * called when there's work for us to do */ void smbiod_wake_up(void) { if (smbiod_state == SMBIOD_DEAD) return; set_bit(SMBIOD_DATA_READY, &smbiod_flags); wake_up_interruptible(&smbiod_wait); } /* * start smbiod if none is running */ static int smbiod_start(void) { struct task_struct *tsk; int err = 0; if (smbiod_state != SMBIOD_DEAD) return 0; smbiod_state = SMBIOD_STARTING; __module_get(THIS_MODULE); spin_unlock(&servers_lock); tsk = kthread_run(smbiod, NULL, "smbiod"); if (IS_ERR(tsk)) { err = PTR_ERR(tsk); module_put(THIS_MODULE); } spin_lock(&servers_lock); if (err < 0) { smbiod_state = SMBIOD_DEAD; smbiod_thread = NULL; } else { smbiod_state = SMBIOD_RUNNING; smbiod_thread = tsk; } return err; } /* * register a server & start smbiod if necessary */ int smbiod_register_server(struct smb_sb_info *server) { int ret; spin_lock(&servers_lock); list_add(&server->entry, &smb_servers); VERBOSE("%p\n", server); ret = smbiod_start(); spin_unlock(&servers_lock); return ret; } /* * Unregister a server * Must be called with the server lock held. */ void smbiod_unregister_server(struct smb_sb_info *server) { spin_lock(&servers_lock); list_del_init(&server->entry); VERBOSE("%p\n", server); spin_unlock(&servers_lock); smbiod_wake_up(); smbiod_flush(server); } void smbiod_flush(struct smb_sb_info *server) { struct list_head *tmp, *n; struct smb_request *req; list_for_each_safe(tmp, n, &server->xmitq) { req = list_entry(tmp, struct smb_request, rq_queue); req->rq_errno = -EIO; list_del_init(&req->rq_queue); smb_rput(req); wake_up_interruptible(&req->rq_wait); } list_for_each_safe(tmp, n, &server->recvq) { req = list_entry(tmp, struct smb_request, rq_queue); req->rq_errno = -EIO; list_del_init(&req->rq_queue); smb_rput(req); wake_up_interruptible(&req->rq_wait); } } /* * Wake up smbmount and make it reconnect to the server. * This must be called with the server locked. * * FIXME: add smbconnect version to this */ int smbiod_retry(struct smb_sb_info *server) { struct list_head *head; struct smb_request *req; struct pid *pid = get_pid(server->conn_pid); int result = 0; VERBOSE("state: %d\n", server->state); if (server->state == CONN_VALID || server->state == CONN_RETRYING) goto out; smb_invalidate_inodes(server); /* * Some requests are meaningless after a retry, so we abort them. * One example are all requests using 'fileid' since the files are * closed on retry. */ head = server->xmitq.next; while (head != &server->xmitq) { req = list_entry(head, struct smb_request, rq_queue); head = head->next; req->rq_bytes_sent = 0; if (req->rq_flags & SMB_REQ_NORETRY) { VERBOSE("aborting request %p on xmitq\n", req); req->rq_errno = -EIO; list_del_init(&req->rq_queue); smb_rput(req); wake_up_interruptible(&req->rq_wait); } } /* * FIXME: test the code for retrying request we already sent */ head = server->recvq.next; while (head != &server->recvq) { req = list_entry(head, struct smb_request, rq_queue); head = head->next; #if 0 if (req->rq_flags & SMB_REQ_RETRY) { /* must move the request to the xmitq */ VERBOSE("retrying request %p on recvq\n", req); list_move(&req->rq_queue, &server->xmitq); continue; } #endif VERBOSE("aborting request %p on recvq\n", req); /* req->rq_rcls = ???; */ /* FIXME: set smb error code too? */ req->rq_errno = -EIO; list_del_init(&req->rq_queue); smb_rput(req); wake_up_interruptible(&req->rq_wait); } smb_close_socket(server); if (!pid) { /* FIXME: this is fatal, umount? */ printk(KERN_ERR "smb_retry: no connection process\n"); server->state = CONN_RETRIED; goto out; } /* * Change state so that only one retry per server will be started. */ server->state = CONN_RETRYING; /* * Note: use the "priv" flag, as a user process may need to reconnect. */ result = kill_pid(pid, SIGUSR1, 1); if (result) { /* FIXME: this is most likely fatal, umount? */ printk(KERN_ERR "smb_retry: signal failed [%d]\n", result); goto out; } VERBOSE("signalled pid %d\n", pid_nr(pid)); /* FIXME: The retried requests should perhaps get a "time boost". */ out: put_pid(pid); return result; } /* * Currently handles lockingX packets. */ static void smbiod_handle_request(struct smb_sb_info *server) { PARANOIA("smbiod got a request ... and we don't implement oplocks!\n"); server->rstate = SMB_RECV_DROP; } /* * Do some IO for one server. */ static void smbiod_doio(struct smb_sb_info *server) { int result; int maxwork = 7; if (server->state != CONN_VALID) goto out; do { result = smb_request_recv(server); if (result < 0) { server->state = CONN_INVALID; smbiod_retry(server); goto out; /* reconnecting is slow */ } else if (server->rstate == SMB_RECV_REQUEST) smbiod_handle_request(server); } while (result > 0 && maxwork-- > 0); /* * If there is more to read then we want to be sure to wake up again. */ if (server->state != CONN_VALID) goto out; if (smb_recv_available(server) > 0) set_bit(SMBIOD_DATA_READY, &smbiod_flags); do { result = smb_request_send_server(server); if (result < 0) { server->state = CONN_INVALID; smbiod_retry(server); goto out; /* reconnecting is slow */ } } while (result > 0); /* * If the last request was not sent out we want to wake up again. */ if (!list_empty(&server->xmitq)) set_bit(SMBIOD_DATA_READY, &smbiod_flags); out: return; } /* * smbiod kernel thread */ static int smbiod(void *unused) { VERBOSE("SMB Kernel thread starting (%d) ...\n", current->pid); for (;;) { struct smb_sb_info *server; struct list_head *pos, *n; /* FIXME: Use poll? */ wait_event_interruptible(smbiod_wait, test_bit(SMBIOD_DATA_READY, &smbiod_flags)); if (signal_pending(current)) { spin_lock(&servers_lock); smbiod_state = SMBIOD_DEAD; spin_unlock(&servers_lock); break; } clear_bit(SMBIOD_DATA_READY, &smbiod_flags); spin_lock(&servers_lock); if (list_empty(&smb_servers)) { smbiod_state = SMBIOD_DEAD; spin_unlock(&servers_lock); break; } list_for_each_safe(pos, n, &smb_servers) { server = list_entry(pos, struct smb_sb_info, entry); VERBOSE("checking server %p\n", server); if (server->state == CONN_VALID) { spin_unlock(&servers_lock); smb_lock_server(server); smbiod_doio(server); smb_unlock_server(server); spin_lock(&servers_lock); } } spin_unlock(&servers_lock); } VERBOSE("SMB Kernel thread exiting (%d) ...\n", current->pid); module_put_and_exit(0); }
gpl-2.0
adrian-bl-hox-jb/endeavoru-jb-crc-3.1.10-e8d900a_3.17.1136.3
arch/arm/mach-shmobile/pfc-sh73a0.c
2945
90336
/* * sh73a0 processor support - PFC hardware block * * Copyright (C) 2010 Renesas Solutions Corp. * Copyright (C) 2010 NISHIMOTO Hiroki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <mach/sh73a0.h> #define _1(fn, pfx, sfx) fn(pfx, sfx) #define _10(fn, pfx, sfx) \ _1(fn, pfx##0, sfx), _1(fn, pfx##1, sfx), \ _1(fn, pfx##2, sfx), _1(fn, pfx##3, sfx), \ _1(fn, pfx##4, sfx), _1(fn, pfx##5, sfx), \ _1(fn, pfx##6, sfx), _1(fn, pfx##7, sfx), \ _1(fn, pfx##8, sfx), _1(fn, pfx##9, sfx) #define _310(fn, pfx, sfx) \ _10(fn, pfx, sfx), _10(fn, pfx##1, sfx), \ _10(fn, pfx##2, sfx), _10(fn, pfx##3, sfx), \ _10(fn, pfx##4, sfx), _10(fn, pfx##5, sfx), \ _10(fn, pfx##6, sfx), _10(fn, pfx##7, sfx), \ _10(fn, pfx##8, sfx), _10(fn, pfx##9, sfx), \ _10(fn, pfx##10, sfx), \ _1(fn, pfx##110, sfx), _1(fn, pfx##111, sfx), \ _1(fn, pfx##112, sfx), _1(fn, pfx##113, sfx), \ _1(fn, pfx##114, sfx), _1(fn, pfx##115, sfx), \ _1(fn, pfx##116, sfx), _1(fn, pfx##117, sfx), \ _1(fn, pfx##118, sfx), \ _1(fn, pfx##128, sfx), _1(fn, pfx##129, sfx), \ _10(fn, pfx##13, sfx), _10(fn, pfx##14, sfx), \ _10(fn, pfx##15, sfx), \ _1(fn, pfx##160, sfx), _1(fn, pfx##161, sfx), \ _1(fn, pfx##162, sfx), _1(fn, pfx##163, sfx), \ _1(fn, pfx##164, sfx), \ _1(fn, pfx##192, sfx), _1(fn, pfx##193, sfx), \ _1(fn, pfx##194, sfx), _1(fn, pfx##195, sfx), \ _1(fn, pfx##196, sfx), _1(fn, pfx##197, sfx), \ _1(fn, pfx##198, sfx), _1(fn, pfx##199, sfx), \ _10(fn, pfx##20, sfx), _10(fn, pfx##21, sfx), \ _10(fn, pfx##22, sfx), _10(fn, pfx##23, sfx), \ _10(fn, pfx##24, sfx), _10(fn, pfx##25, sfx), \ _10(fn, pfx##26, sfx), _10(fn, pfx##27, sfx), \ _1(fn, pfx##280, sfx), _1(fn, pfx##281, sfx), \ _1(fn, pfx##282, sfx), \ _1(fn, pfx##288, sfx), _1(fn, pfx##289, sfx), \ _10(fn, pfx##29, sfx), _10(fn, pfx##30, sfx) #define _PORT(pfx, sfx) pfx##_##sfx #define PORT_310(str) _310(_PORT, PORT, str) enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PORT_310(DATA), /* PORT0_DATA -> PORT309_DATA */ PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PORT_310(IN), /* PORT0_IN -> PORT309_IN */ PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PORT_310(IN_PU), /* PORT0_IN_PU -> PORT309_IN_PU */ PINMUX_INPUT_PULLUP_END, PINMUX_INPUT_PULLDOWN_BEGIN, PORT_310(IN_PD), /* PORT0_IN_PD -> PORT309_IN_PD */ PINMUX_INPUT_PULLDOWN_END, PINMUX_OUTPUT_BEGIN, PORT_310(OUT), /* PORT0_OUT -> PORT309_OUT */ PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PORT_310(FN_IN), /* PORT0_FN_IN -> PORT309_FN_IN */ PORT_310(FN_OUT), /* PORT0_FN_OUT -> PORT309_FN_OUT */ PORT_310(FN0), /* PORT0_FN0 -> PORT309_FN0 */ PORT_310(FN1), /* PORT0_FN1 -> PORT309_FN1 */ PORT_310(FN2), /* PORT0_FN2 -> PORT309_FN2 */ PORT_310(FN3), /* PORT0_FN3 -> PORT309_FN3 */ PORT_310(FN4), /* PORT0_FN4 -> PORT309_FN4 */ PORT_310(FN5), /* PORT0_FN5 -> PORT309_FN5 */ PORT_310(FN6), /* PORT0_FN6 -> PORT309_FN6 */ PORT_310(FN7), /* PORT0_FN7 -> PORT309_FN7 */ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1, MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1, MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1, MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1, MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1, MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1, MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1, MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1, MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1, MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1, MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1, MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1, MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1, MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1, MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1, MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1, MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1, MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1, MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1, MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1, MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1, MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1, MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1, MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1, MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1, MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1, MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1, MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1, MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1, MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1, MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1, MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1, MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1, MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1, MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /* Hardware manual Table 25-1 (Function 0-7) */ VBUS_0_MARK, GPI0_MARK, GPI1_MARK, GPI2_MARK, GPI3_MARK, GPI4_MARK, GPI5_MARK, GPI6_MARK, GPI7_MARK, SCIFA7_RXD_MARK, SCIFA7_CTS__MARK, GPO7_MARK, MFG0_OUT2_MARK, GPO6_MARK, MFG1_OUT2_MARK, GPO5_MARK, SCIFA0_SCK_MARK, FSICOSLDT3_MARK, PORT16_VIO_CKOR_MARK, SCIFA0_TXD_MARK, SCIFA7_TXD_MARK, SCIFA7_RTS__MARK, PORT19_VIO_CKO2_MARK, GPO0_MARK, GPO1_MARK, GPO2_MARK, STATUS0_MARK, GPO3_MARK, STATUS1_MARK, GPO4_MARK, STATUS2_MARK, VINT_MARK, TCKON_MARK, XDVFS1_MARK, PORT27_I2C_SCL2_MARK, PORT27_I2C_SCL3_MARK, \ MFG0_OUT1_MARK, PORT27_IROUT_MARK, XDVFS2_MARK, PORT28_I2C_SDA2_MARK, PORT28_I2C_SDA3_MARK, \ PORT28_TPU1TO1_MARK, SIM_RST_MARK, PORT29_TPU1TO1_MARK, SIM_CLK_MARK, PORT30_VIO_CKOR_MARK, SIM_D_MARK, PORT31_IROUT_MARK, SCIFA4_TXD_MARK, SCIFA4_RXD_MARK, XWUP_MARK, SCIFA4_RTS__MARK, SCIFA4_CTS__MARK, FSIBOBT_MARK, FSIBIBT_MARK, FSIBOLR_MARK, FSIBILR_MARK, FSIBOSLD_MARK, FSIBISLD_MARK, VACK_MARK, XTAL1L_MARK, SCIFA0_RTS__MARK, FSICOSLDT2_MARK, SCIFA0_RXD_MARK, SCIFA0_CTS__MARK, FSICOSLDT1_MARK, FSICOBT_MARK, FSICIBT_MARK, FSIDOBT_MARK, FSIDIBT_MARK, FSICOLR_MARK, FSICILR_MARK, FSIDOLR_MARK, FSIDILR_MARK, FSICOSLD_MARK, PORT47_FSICSPDIF_MARK, FSICISLD_MARK, FSIDISLD_MARK, FSIACK_MARK, PORT49_IRDA_OUT_MARK, PORT49_IROUT_MARK, FSIAOMC_MARK, FSIAOLR_MARK, BBIF2_TSYNC2_MARK, TPU2TO2_MARK, FSIAILR_MARK, FSIAOBT_MARK, BBIF2_TSCK2_MARK, TPU2TO3_MARK, FSIAIBT_MARK, FSIAOSLD_MARK, BBIF2_TXD2_MARK, FSIASPDIF_MARK, PORT53_IRDA_IN_MARK, TPU3TO3_MARK, FSIBSPDIF_MARK, \ PORT53_FSICSPDIF_MARK, FSIBCK_MARK, PORT54_IRDA_FIRSEL_MARK, TPU3TO2_MARK, FSIBOMC_MARK, \ FSICCK_MARK, FSICOMC_MARK, FSIAISLD_MARK, TPU0TO0_MARK, A0_MARK, BS__MARK, A12_MARK, PORT58_KEYOUT7_MARK, TPU4TO2_MARK, A13_MARK, PORT59_KEYOUT6_MARK, TPU0TO1_MARK, A14_MARK, KEYOUT5_MARK, A15_MARK, KEYOUT4_MARK, A16_MARK, KEYOUT3_MARK, MSIOF0_SS1_MARK, A17_MARK, KEYOUT2_MARK, MSIOF0_TSYNC_MARK, A18_MARK, KEYOUT1_MARK, MSIOF0_TSCK_MARK, A19_MARK, KEYOUT0_MARK, MSIOF0_TXD_MARK, A20_MARK, KEYIN0_MARK, MSIOF0_RSCK_MARK, A21_MARK, KEYIN1_MARK, MSIOF0_RSYNC_MARK, A22_MARK, KEYIN2_MARK, MSIOF0_MCK0_MARK, A23_MARK, KEYIN3_MARK, MSIOF0_MCK1_MARK, A24_MARK, KEYIN4_MARK, MSIOF0_RXD_MARK, A25_MARK, KEYIN5_MARK, MSIOF0_SS2_MARK, A26_MARK, KEYIN6_MARK, KEYIN7_MARK, D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, D3_NAF3_MARK, D4_NAF4_MARK, D5_NAF5_MARK, D6_NAF6_MARK, D7_NAF7_MARK, D8_NAF8_MARK, D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK, D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, D15_NAF15_MARK, CS4__MARK, CS5A__MARK, PORT91_RDWR_MARK, CS5B__MARK, FCE1__MARK, CS6B__MARK, DACK0_MARK, FCE0__MARK, CS6A__MARK, WAIT__MARK, DREQ0_MARK, RD__FSC_MARK, WE0__FWE_MARK, RDWR_FWE_MARK, WE1__MARK, FRB_MARK, CKO_MARK, NBRSTOUT__MARK, NBRST__MARK, BBIF2_TXD_MARK, BBIF2_RXD_MARK, BBIF2_SYNC_MARK, BBIF2_SCK_MARK, SCIFA3_CTS__MARK, MFG3_IN2_MARK, SCIFA3_RXD_MARK, MFG3_IN1_MARK, BBIF1_SS2_MARK, SCIFA3_RTS__MARK, MFG3_OUT1_MARK, SCIFA3_TXD_MARK, HSI_RX_DATA_MARK, BBIF1_RXD_MARK, HSI_TX_WAKE_MARK, BBIF1_TSCK_MARK, HSI_TX_DATA_MARK, BBIF1_TSYNC_MARK, HSI_TX_READY_MARK, BBIF1_TXD_MARK, HSI_RX_READY_MARK, BBIF1_RSCK_MARK, PORT115_I2C_SCL2_MARK, \ PORT115_I2C_SCL3_MARK, HSI_RX_WAKE_MARK, BBIF1_RSYNC_MARK, PORT116_I2C_SDA2_MARK, \ PORT116_I2C_SDA3_MARK, HSI_RX_FLAG_MARK, BBIF1_SS1_MARK, BBIF1_FLOW_MARK, HSI_TX_FLAG_MARK, VIO_VD_MARK, PORT128_LCD2VSYN_MARK, VIO2_VD_MARK, LCD2D0_MARK, VIO_HD_MARK, PORT129_LCD2HSYN_MARK, PORT129_LCD2CS__MARK, \ VIO2_HD_MARK, LCD2D1_MARK, VIO_D0_MARK, PORT130_MSIOF2_RXD_MARK, LCD2D10_MARK, VIO_D1_MARK, PORT131_KEYOUT6_MARK, PORT131_MSIOF2_SS1_MARK, \ PORT131_KEYOUT11_MARK, LCD2D11_MARK, VIO_D2_MARK, PORT132_KEYOUT7_MARK, PORT132_MSIOF2_SS2_MARK, \ PORT132_KEYOUT10_MARK, LCD2D12_MARK, VIO_D3_MARK, MSIOF2_TSYNC_MARK, LCD2D13_MARK, VIO_D4_MARK, MSIOF2_TXD_MARK, LCD2D14_MARK, VIO_D5_MARK, MSIOF2_TSCK_MARK, LCD2D15_MARK, VIO_D6_MARK, PORT136_KEYOUT8_MARK, LCD2D16_MARK, VIO_D7_MARK, PORT137_KEYOUT9_MARK, LCD2D17_MARK, VIO_D8_MARK, PORT138_KEYOUT8_MARK, VIO2_D0_MARK, LCD2D6_MARK, VIO_D9_MARK, PORT139_KEYOUT9_MARK, VIO2_D1_MARK, LCD2D7_MARK, VIO_D10_MARK, TPU0TO2_MARK, VIO2_D2_MARK, LCD2D8_MARK, VIO_D11_MARK, TPU0TO3_MARK, VIO2_D3_MARK, LCD2D9_MARK, VIO_D12_MARK, PORT142_KEYOUT10_MARK, VIO2_D4_MARK, LCD2D2_MARK, VIO_D13_MARK, PORT143_KEYOUT11_MARK, PORT143_KEYOUT6_MARK, \ VIO2_D5_MARK, LCD2D3_MARK, VIO_D14_MARK, PORT144_KEYOUT7_MARK, VIO2_D6_MARK, LCD2D4_MARK, VIO_D15_MARK, TPU1TO3_MARK, PORT145_LCD2DISP_MARK, \ PORT145_LCD2RS_MARK, VIO2_D7_MARK, LCD2D5_MARK, VIO_CLK_MARK, LCD2DCK_MARK, PORT146_LCD2WR__MARK, VIO2_CLK_MARK, \ LCD2D18_MARK, VIO_FIELD_MARK, LCD2RD__MARK, VIO2_FIELD_MARK, LCD2D19_MARK, VIO_CKO_MARK, A27_MARK, PORT149_RDWR_MARK, MFG0_IN1_MARK, PORT149_KEYOUT9_MARK, MFG0_IN2_MARK, TS_SPSYNC3_MARK, MSIOF2_RSCK_MARK, TS_SDAT3_MARK, MSIOF2_RSYNC_MARK, TPU1TO2_MARK, TS_SDEN3_MARK, PORT153_MSIOF2_SS1_MARK, SCIFA2_TXD1_MARK, MSIOF2_MCK0_MARK, SCIFA2_RXD1_MARK, MSIOF2_MCK1_MARK, SCIFA2_RTS1__MARK, PORT156_MSIOF2_SS2_MARK, SCIFA2_CTS1__MARK, PORT157_MSIOF2_RXD_MARK, DINT__MARK, SCIFA2_SCK1_MARK, TS_SCK3_MARK, PORT159_SCIFB_SCK_MARK, PORT159_SCIFA5_SCK_MARK, NMI_MARK, PORT160_SCIFB_TXD_MARK, PORT160_SCIFA5_TXD_MARK, PORT161_SCIFB_CTS__MARK, PORT161_SCIFA5_CTS__MARK, PORT162_SCIFB_RXD_MARK, PORT162_SCIFA5_RXD_MARK, PORT163_SCIFB_RTS__MARK, PORT163_SCIFA5_RTS__MARK, TPU3TO0_MARK, LCDD0_MARK, LCDD1_MARK, PORT193_SCIFA5_CTS__MARK, BBIF2_TSYNC1_MARK, LCDD2_MARK, PORT194_SCIFA5_RTS__MARK, BBIF2_TSCK1_MARK, LCDD3_MARK, PORT195_SCIFA5_RXD_MARK, BBIF2_TXD1_MARK, LCDD4_MARK, PORT196_SCIFA5_TXD_MARK, LCDD5_MARK, PORT197_SCIFA5_SCK_MARK, MFG2_OUT2_MARK, TPU2TO1_MARK, LCDD6_MARK, LCDD7_MARK, TPU4TO1_MARK, MFG4_OUT2_MARK, LCDD8_MARK, D16_MARK, LCDD9_MARK, D17_MARK, LCDD10_MARK, D18_MARK, LCDD11_MARK, D19_MARK, LCDD12_MARK, D20_MARK, LCDD13_MARK, D21_MARK, LCDD14_MARK, D22_MARK, LCDD15_MARK, PORT207_MSIOF0L_SS1_MARK, D23_MARK, LCDD16_MARK, PORT208_MSIOF0L_SS2_MARK, D24_MARK, LCDD17_MARK, D25_MARK, LCDD18_MARK, DREQ2_MARK, PORT210_MSIOF0L_SS1_MARK, D26_MARK, LCDD19_MARK, PORT211_MSIOF0L_SS2_MARK, D27_MARK, LCDD20_MARK, TS_SPSYNC1_MARK, MSIOF0L_MCK0_MARK, D28_MARK, LCDD21_MARK, TS_SDAT1_MARK, MSIOF0L_MCK1_MARK, D29_MARK, LCDD22_MARK, TS_SDEN1_MARK, MSIOF0L_RSCK_MARK, D30_MARK, LCDD23_MARK, TS_SCK1_MARK, MSIOF0L_RSYNC_MARK, D31_MARK, LCDDCK_MARK, LCDWR__MARK, LCDRD__MARK, DACK2_MARK, PORT217_LCD2RS_MARK, MSIOF0L_TSYNC_MARK, \ VIO2_FIELD3_MARK, PORT217_LCD2DISP_MARK, LCDHSYN_MARK, LCDCS__MARK, LCDCS2__MARK, DACK3_MARK, \ PORT218_VIO_CKOR_MARK, LCDDISP_MARK, LCDRS_MARK, PORT219_LCD2WR__MARK, DREQ3_MARK, \ MSIOF0L_TSCK_MARK, VIO2_CLK3_MARK, LCD2DCK_2_MARK, LCDVSYN_MARK, LCDVSYN2_MARK, LCDLCLK_MARK, DREQ1_MARK, PORT221_LCD2CS__MARK, PWEN_MARK, \ MSIOF0L_RXD_MARK, VIO2_HD3_MARK, PORT221_LCD2HSYN_MARK, LCDDON_MARK, LCDDON2_MARK, DACK1_MARK, OVCN_MARK, MSIOF0L_TXD_MARK, \ VIO2_VD3_MARK, PORT222_LCD2VSYN_MARK, SCIFA1_TXD_MARK, OVCN2_MARK, EXTLP_MARK, SCIFA1_SCK_MARK, PORT226_VIO_CKO2_MARK, SCIFA1_RTS__MARK, IDIN_MARK, SCIFA1_RXD_MARK, SCIFA1_CTS__MARK, MFG1_IN1_MARK, MSIOF1_TXD_MARK, SCIFA2_TXD2_MARK, MSIOF1_TSYNC_MARK, SCIFA2_CTS2__MARK, MSIOF1_TSCK_MARK, SCIFA2_SCK2_MARK, MSIOF1_RXD_MARK, SCIFA2_RXD2_MARK, MSIOF1_RSCK_MARK, SCIFA2_RTS2__MARK, VIO2_CLK2_MARK, LCD2D20_MARK, MSIOF1_RSYNC_MARK, MFG1_IN2_MARK, VIO2_VD2_MARK, LCD2D21_MARK, MSIOF1_MCK0_MARK, PORT236_I2C_SDA2_MARK, MSIOF1_MCK1_MARK, PORT237_I2C_SCL2_MARK, MSIOF1_SS1_MARK, VIO2_FIELD2_MARK, LCD2D22_MARK, MSIOF1_SS2_MARK, VIO2_HD2_MARK, LCD2D23_MARK, SCIFA6_TXD_MARK, PORT241_IRDA_OUT_MARK, PORT241_IROUT_MARK, MFG4_OUT1_MARK, TPU4TO0_MARK, PORT242_IRDA_IN_MARK, MFG4_IN2_MARK, PORT243_IRDA_FIRSEL_MARK, PORT243_VIO_CKO2_MARK, PORT244_SCIFA5_CTS__MARK, MFG2_IN1_MARK, PORT244_SCIFB_CTS__MARK, \ MSIOF2R_RXD_MARK, PORT245_SCIFA5_RTS__MARK, MFG2_IN2_MARK, PORT245_SCIFB_RTS__MARK, \ MSIOF2R_TXD_MARK, PORT246_SCIFA5_RXD_MARK, MFG1_OUT1_MARK, PORT246_SCIFB_RXD_MARK, \ TPU1TO0_MARK, PORT247_SCIFA5_TXD_MARK, MFG3_OUT2_MARK, PORT247_SCIFB_TXD_MARK, \ TPU3TO1_MARK, PORT248_SCIFA5_SCK_MARK, MFG2_OUT1_MARK, PORT248_SCIFB_SCK_MARK, \ TPU2TO0_MARK, PORT248_I2C_SCL3_MARK, MSIOF2R_TSCK_MARK, PORT249_IROUT_MARK, MFG4_IN1_MARK, PORT249_I2C_SDA3_MARK, \ MSIOF2R_TSYNC_MARK, SDHICLK0_MARK, SDHICD0_MARK, SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK, SDHICMD0_MARK, SDHIWP0_MARK, SDHICLK1_MARK, SDHID1_0_MARK, TS_SPSYNC2_MARK, SDHID1_1_MARK, TS_SDAT2_MARK, SDHID1_2_MARK, TS_SDEN2_MARK, SDHID1_3_MARK, TS_SCK2_MARK, SDHICMD1_MARK, SDHICLK2_MARK, SDHID2_0_MARK, TS_SPSYNC4_MARK, SDHID2_1_MARK, TS_SDAT4_MARK, SDHID2_2_MARK, TS_SDEN4_MARK, SDHID2_3_MARK, TS_SCK4_MARK, SDHICMD2_MARK, MMCCLK0_MARK, MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK, MMCD0_4_MARK, TS_SPSYNC5_MARK, MMCD0_5_MARK, TS_SDAT5_MARK, MMCD0_6_MARK, TS_SDEN5_MARK, MMCD0_7_MARK, TS_SCK5_MARK, MMCCMD0_MARK, RESETOUTS__MARK, EXTAL2OUT_MARK, MCP_WAIT__MCP_FRB_MARK, MCP_CKO_MARK, MMCCLK1_MARK, MCP_D15_MCP_NAF15_MARK, MCP_D14_MCP_NAF14_MARK, MCP_D13_MCP_NAF13_MARK, MCP_D12_MCP_NAF12_MARK, MCP_D11_MCP_NAF11_MARK, MCP_D10_MCP_NAF10_MARK, MCP_D9_MCP_NAF9_MARK, MCP_D8_MCP_NAF8_MARK, MMCCMD1_MARK, MCP_D7_MCP_NAF7_MARK, MMCD1_7_MARK, MCP_D6_MCP_NAF6_MARK, MMCD1_6_MARK, MCP_D5_MCP_NAF5_MARK, MMCD1_5_MARK, MCP_D4_MCP_NAF4_MARK, MMCD1_4_MARK, MCP_D3_MCP_NAF3_MARK, MMCD1_3_MARK, MCP_D2_MCP_NAF2_MARK, MMCD1_2_MARK, MCP_D1_MCP_NAF1_MARK, MMCD1_1_MARK, MCP_D0_MCP_NAF0_MARK, MMCD1_0_MARK, MCP_NBRSTOUT__MARK, MCP_WE0__MCP_FWE_MARK, MCP_RDWR_MCP_FWE_MARK, /* MSEL2 special cases */ TSIF2_TS_XX1_MARK, TSIF2_TS_XX2_MARK, TSIF2_TS_XX3_MARK, TSIF2_TS_XX4_MARK, TSIF2_TS_XX5_MARK, TSIF1_TS_XX1_MARK, TSIF1_TS_XX2_MARK, TSIF1_TS_XX3_MARK, TSIF1_TS_XX4_MARK, TSIF1_TS_XX5_MARK, TSIF0_TS_XX1_MARK, TSIF0_TS_XX2_MARK, TSIF0_TS_XX3_MARK, TSIF0_TS_XX4_MARK, TSIF0_TS_XX5_MARK, MST1_TS_XX1_MARK, MST1_TS_XX2_MARK, MST1_TS_XX3_MARK, MST1_TS_XX4_MARK, MST1_TS_XX5_MARK, MST0_TS_XX1_MARK, MST0_TS_XX2_MARK, MST0_TS_XX3_MARK, MST0_TS_XX4_MARK, MST0_TS_XX5_MARK, /* MSEL3 special cases */ SDHI0_VCCQ_MC0_ON_MARK, SDHI0_VCCQ_MC0_OFF_MARK, DEBUG_MON_VIO_MARK, DEBUG_MON_LCDD_MARK, LCDC_LCDC0_MARK, LCDC_LCDC1_MARK, /* MSEL4 special cases */ IRQ9_MEM_INT_MARK, IRQ9_MCP_INT_MARK, A11_MARK, KEYOUT8_MARK, TPU4TO3_MARK, RESETA_N_PU_ON_MARK, RESETA_N_PU_OFF_MARK, EDBGREQ_PD_MARK, EDBGREQ_PU_MARK, /* Functions with pull-ups */ KEYIN0_PU_MARK, KEYIN1_PU_MARK, KEYIN2_PU_MARK, KEYIN3_PU_MARK, KEYIN4_PU_MARK, KEYIN5_PU_MARK, KEYIN6_PU_MARK, KEYIN7_PU_MARK, SDHID1_0_PU_MARK, SDHID1_1_PU_MARK, SDHID1_2_PU_MARK, SDHID1_3_PU_MARK, SDHICMD1_PU_MARK, MMCCMD0_PU_MARK, MMCCMD1_PU_MARK, FSIACK_PU_MARK, FSIAILR_PU_MARK, FSIAIBT_PU_MARK, FSIAISLD_PU_MARK, PINMUX_MARK_END, }; #define PORT_DATA_I(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN) #define PORT_DATA_I_PD(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_IN, PORT##nr##_IN_PD) #define PORT_DATA_I_PU(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_IN, PORT##nr##_IN_PU) #define PORT_DATA_I_PU_PD(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_IN, PORT##nr##_IN_PD, \ PORT##nr##_IN_PU) #define PORT_DATA_O(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_OUT) #define PORT_DATA_IO(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_OUT, PORT##nr##_IN) #define PORT_DATA_IO_PD(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_OUT, PORT##nr##_IN, \ PORT##nr##_IN_PD) #define PORT_DATA_IO_PU(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_OUT, PORT##nr##_IN, \ PORT##nr##_IN_PU) #define PORT_DATA_IO_PU_PD(nr) \ PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \ PORT##nr##_OUT, PORT##nr##_IN, \ PORT##nr##_IN_PD, PORT##nr##_IN_PU) static pinmux_enum_t pinmux_data[] = { /* specify valid pin states for each pin in GPIO mode */ /* Table 25-1 (I/O and Pull U/D) */ PORT_DATA_I_PD(0), PORT_DATA_I_PU(1), PORT_DATA_I_PU(2), PORT_DATA_I_PU(3), PORT_DATA_I_PU(4), PORT_DATA_I_PU(5), PORT_DATA_I_PU(6), PORT_DATA_I_PU(7), PORT_DATA_I_PU(8), PORT_DATA_I_PD(9), PORT_DATA_I_PD(10), PORT_DATA_I_PU_PD(11), PORT_DATA_IO_PU_PD(12), PORT_DATA_IO_PU_PD(13), PORT_DATA_IO_PU_PD(14), PORT_DATA_IO_PU_PD(15), PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17), PORT_DATA_IO_PU(18), PORT_DATA_IO_PU(19), PORT_DATA_O(20), PORT_DATA_O(21), PORT_DATA_O(22), PORT_DATA_O(23), PORT_DATA_O(24), PORT_DATA_I_PD(25), PORT_DATA_I_PD(26), PORT_DATA_IO_PU(27), PORT_DATA_IO_PU(28), PORT_DATA_IO_PD(29), PORT_DATA_IO_PD(30), PORT_DATA_IO_PU(31), PORT_DATA_IO_PD(32), PORT_DATA_I_PU_PD(33), PORT_DATA_IO_PD(34), PORT_DATA_I_PU_PD(35), PORT_DATA_IO_PD(36), PORT_DATA_IO(37), PORT_DATA_O(38), PORT_DATA_I_PU(39), PORT_DATA_I_PU_PD(40), PORT_DATA_O(41), PORT_DATA_IO_PD(42), PORT_DATA_IO_PU_PD(43), PORT_DATA_IO_PU_PD(44), PORT_DATA_IO_PD(45), PORT_DATA_IO_PD(46), PORT_DATA_IO_PD(47), PORT_DATA_I_PD(48), PORT_DATA_IO_PU_PD(49), PORT_DATA_IO_PD(50), PORT_DATA_IO_PD(51), PORT_DATA_O(52), PORT_DATA_IO_PU_PD(53), PORT_DATA_IO_PU_PD(54), PORT_DATA_IO_PD(55), PORT_DATA_I_PU_PD(56), PORT_DATA_IO(57), PORT_DATA_IO(58), PORT_DATA_IO(59), PORT_DATA_IO(60), PORT_DATA_IO(61), PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63), PORT_DATA_IO_PU_PD(64), PORT_DATA_IO_PD(65), PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67), PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69), PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71), PORT_DATA_IO_PU_PD(72), PORT_DATA_I_PU_PD(73), PORT_DATA_IO_PU(74), PORT_DATA_IO_PU(75), PORT_DATA_IO_PU(76), PORT_DATA_IO_PU(77), PORT_DATA_IO_PU(78), PORT_DATA_IO_PU(79), PORT_DATA_IO_PU(80), PORT_DATA_IO_PU(81), PORT_DATA_IO_PU(82), PORT_DATA_IO_PU(83), PORT_DATA_IO_PU(84), PORT_DATA_IO_PU(85), PORT_DATA_IO_PU(86), PORT_DATA_IO_PU(87), PORT_DATA_IO_PU(88), PORT_DATA_IO_PU(89), PORT_DATA_O(90), PORT_DATA_IO_PU(91), PORT_DATA_O(92), PORT_DATA_IO_PU(93), PORT_DATA_O(94), PORT_DATA_I_PU_PD(95), PORT_DATA_IO(96), PORT_DATA_IO(97), PORT_DATA_IO(98), PORT_DATA_I_PU(99), PORT_DATA_O(100), PORT_DATA_O(101), PORT_DATA_I_PU(102), PORT_DATA_IO_PD(103), PORT_DATA_I_PU_PD(104), PORT_DATA_I_PD(105), PORT_DATA_I_PD(106), PORT_DATA_I_PU_PD(107), PORT_DATA_I_PU_PD(108), PORT_DATA_IO_PD(109), PORT_DATA_IO_PD(110), PORT_DATA_IO_PU_PD(111), PORT_DATA_IO_PU_PD(112), PORT_DATA_IO_PU_PD(113), PORT_DATA_IO_PD(114), PORT_DATA_IO_PU(115), PORT_DATA_IO_PU(116), PORT_DATA_IO_PU_PD(117), PORT_DATA_IO_PU_PD(118), PORT_DATA_IO_PD(128), PORT_DATA_IO_PD(129), PORT_DATA_IO_PU_PD(130), PORT_DATA_IO_PD(131), PORT_DATA_IO_PD(132), PORT_DATA_IO_PD(133), PORT_DATA_IO_PU_PD(134), PORT_DATA_IO_PU_PD(135), PORT_DATA_IO_PU_PD(136), PORT_DATA_IO_PU_PD(137), PORT_DATA_IO_PD(138), PORT_DATA_IO_PD(139), PORT_DATA_IO_PD(140), PORT_DATA_IO_PD(141), PORT_DATA_IO_PD(142), PORT_DATA_IO_PD(143), PORT_DATA_IO_PU_PD(144), PORT_DATA_IO_PD(145), PORT_DATA_IO_PU_PD(146), PORT_DATA_IO_PU_PD(147), PORT_DATA_IO_PU_PD(148), PORT_DATA_IO_PU_PD(149), PORT_DATA_I_PU_PD(150), PORT_DATA_IO_PU_PD(151), PORT_DATA_IO_PU_PD(152), PORT_DATA_IO_PD(153), PORT_DATA_IO_PD(154), PORT_DATA_I_PU_PD(155), PORT_DATA_IO_PU_PD(156), PORT_DATA_I_PD(157), PORT_DATA_IO_PD(158), PORT_DATA_IO_PU_PD(159), PORT_DATA_IO_PU_PD(160), PORT_DATA_I_PU_PD(161), PORT_DATA_I_PU_PD(162), PORT_DATA_IO_PU_PD(163), PORT_DATA_I_PU_PD(164), PORT_DATA_IO_PD(192), PORT_DATA_IO_PU_PD(193), PORT_DATA_IO_PD(194), PORT_DATA_IO_PU_PD(195), PORT_DATA_IO_PD(196), PORT_DATA_IO_PD(197), PORT_DATA_IO_PD(198), PORT_DATA_IO_PD(199), PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU_PD(201), PORT_DATA_IO_PU_PD(202), PORT_DATA_IO_PU_PD(203), PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205), PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PD(207), PORT_DATA_IO_PD(208), PORT_DATA_IO_PD(209), PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211), PORT_DATA_IO_PD(212), PORT_DATA_IO_PD(213), PORT_DATA_IO_PU_PD(214), PORT_DATA_IO_PU_PD(215), PORT_DATA_IO_PD(216), PORT_DATA_IO_PD(217), PORT_DATA_O(218), PORT_DATA_IO_PD(219), PORT_DATA_IO_PD(220), PORT_DATA_IO_PU_PD(221), PORT_DATA_IO_PU_PD(222), PORT_DATA_I_PU_PD(223), PORT_DATA_I_PU_PD(224), PORT_DATA_IO_PU_PD(225), PORT_DATA_O(226), PORT_DATA_IO_PU_PD(227), PORT_DATA_I_PU_PD(228), PORT_DATA_I_PD(229), PORT_DATA_IO(230), PORT_DATA_IO_PU_PD(231), PORT_DATA_IO_PU_PD(232), PORT_DATA_I_PU_PD(233), PORT_DATA_IO_PU_PD(234), PORT_DATA_IO_PU_PD(235), PORT_DATA_IO_PU_PD(236), PORT_DATA_IO_PD(237), PORT_DATA_IO_PU_PD(238), PORT_DATA_IO_PU_PD(239), PORT_DATA_IO_PU_PD(240), PORT_DATA_O(241), PORT_DATA_I_PD(242), PORT_DATA_IO_PU_PD(243), PORT_DATA_IO_PU_PD(244), PORT_DATA_IO_PU_PD(245), PORT_DATA_IO_PU_PD(246), PORT_DATA_IO_PU_PD(247), PORT_DATA_IO_PU_PD(248), PORT_DATA_IO_PU_PD(249), PORT_DATA_IO_PU_PD(250), PORT_DATA_IO_PU_PD(251), PORT_DATA_IO_PU_PD(252), PORT_DATA_IO_PU_PD(253), PORT_DATA_IO_PU_PD(254), PORT_DATA_IO_PU_PD(255), PORT_DATA_IO_PU_PD(256), PORT_DATA_IO_PU_PD(257), PORT_DATA_IO_PU_PD(258), PORT_DATA_IO_PU_PD(259), PORT_DATA_IO_PU_PD(260), PORT_DATA_IO_PU_PD(261), PORT_DATA_IO_PU_PD(262), PORT_DATA_IO_PU_PD(263), PORT_DATA_IO_PU_PD(264), PORT_DATA_IO_PU_PD(265), PORT_DATA_IO_PU_PD(266), PORT_DATA_IO_PU_PD(267), PORT_DATA_IO_PU_PD(268), PORT_DATA_IO_PU_PD(269), PORT_DATA_IO_PU_PD(270), PORT_DATA_IO_PU_PD(271), PORT_DATA_IO_PU_PD(272), PORT_DATA_IO_PU_PD(273), PORT_DATA_IO_PU_PD(274), PORT_DATA_IO_PU_PD(275), PORT_DATA_IO_PU_PD(276), PORT_DATA_IO_PU_PD(277), PORT_DATA_IO_PU_PD(278), PORT_DATA_IO_PU_PD(279), PORT_DATA_IO_PU_PD(280), PORT_DATA_O(281), PORT_DATA_O(282), PORT_DATA_I_PU(288), PORT_DATA_IO_PU_PD(289), PORT_DATA_IO_PU_PD(290), PORT_DATA_IO_PU_PD(291), PORT_DATA_IO_PU_PD(292), PORT_DATA_IO_PU_PD(293), PORT_DATA_IO_PU_PD(294), PORT_DATA_IO_PU_PD(295), PORT_DATA_IO_PU_PD(296), PORT_DATA_IO_PU_PD(297), PORT_DATA_IO_PU_PD(298), PORT_DATA_IO_PU_PD(299), PORT_DATA_IO_PU_PD(300), PORT_DATA_IO_PU_PD(301), PORT_DATA_IO_PU_PD(302), PORT_DATA_IO_PU_PD(303), PORT_DATA_IO_PU_PD(304), PORT_DATA_IO_PU_PD(305), PORT_DATA_O(306), PORT_DATA_O(307), PORT_DATA_I_PU(308), PORT_DATA_O(309), /* Table 25-1 (Function 0-7) */ PINMUX_DATA(VBUS_0_MARK, PORT0_FN1), PINMUX_DATA(GPI0_MARK, PORT1_FN1), PINMUX_DATA(GPI1_MARK, PORT2_FN1), PINMUX_DATA(GPI2_MARK, PORT3_FN1), PINMUX_DATA(GPI3_MARK, PORT4_FN1), PINMUX_DATA(GPI4_MARK, PORT5_FN1), PINMUX_DATA(GPI5_MARK, PORT6_FN1), PINMUX_DATA(GPI6_MARK, PORT7_FN1), PINMUX_DATA(GPI7_MARK, PORT8_FN1), PINMUX_DATA(SCIFA7_RXD_MARK, PORT12_FN2), PINMUX_DATA(SCIFA7_CTS__MARK, PORT13_FN2), PINMUX_DATA(GPO7_MARK, PORT14_FN1), \ PINMUX_DATA(MFG0_OUT2_MARK, PORT14_FN4), PINMUX_DATA(GPO6_MARK, PORT15_FN1), \ PINMUX_DATA(MFG1_OUT2_MARK, PORT15_FN4), PINMUX_DATA(GPO5_MARK, PORT16_FN1), \ PINMUX_DATA(SCIFA0_SCK_MARK, PORT16_FN2), \ PINMUX_DATA(FSICOSLDT3_MARK, PORT16_FN3), \ PINMUX_DATA(PORT16_VIO_CKOR_MARK, PORT16_FN4), PINMUX_DATA(SCIFA0_TXD_MARK, PORT17_FN2), PINMUX_DATA(SCIFA7_TXD_MARK, PORT18_FN2), PINMUX_DATA(SCIFA7_RTS__MARK, PORT19_FN2), \ PINMUX_DATA(PORT19_VIO_CKO2_MARK, PORT19_FN3), PINMUX_DATA(GPO0_MARK, PORT20_FN1), PINMUX_DATA(GPO1_MARK, PORT21_FN1), PINMUX_DATA(GPO2_MARK, PORT22_FN1), \ PINMUX_DATA(STATUS0_MARK, PORT22_FN2), PINMUX_DATA(GPO3_MARK, PORT23_FN1), \ PINMUX_DATA(STATUS1_MARK, PORT23_FN2), PINMUX_DATA(GPO4_MARK, PORT24_FN1), \ PINMUX_DATA(STATUS2_MARK, PORT24_FN2), PINMUX_DATA(VINT_MARK, PORT25_FN1), PINMUX_DATA(TCKON_MARK, PORT26_FN1), PINMUX_DATA(XDVFS1_MARK, PORT27_FN1), \ PINMUX_DATA(PORT27_I2C_SCL2_MARK, PORT27_FN2, MSEL2CR_MSEL17_0, MSEL2CR_MSEL16_1), \ PINMUX_DATA(PORT27_I2C_SCL3_MARK, PORT27_FN3, MSEL2CR_MSEL19_0, MSEL2CR_MSEL18_0), \ PINMUX_DATA(MFG0_OUT1_MARK, PORT27_FN4), \ PINMUX_DATA(PORT27_IROUT_MARK, PORT27_FN7), PINMUX_DATA(XDVFS2_MARK, PORT28_FN1), \ PINMUX_DATA(PORT28_I2C_SDA2_MARK, PORT28_FN2, MSEL2CR_MSEL17_0, MSEL2CR_MSEL16_1), \ PINMUX_DATA(PORT28_I2C_SDA3_MARK, PORT28_FN3, MSEL2CR_MSEL19_0, MSEL2CR_MSEL18_0), \ PINMUX_DATA(PORT28_TPU1TO1_MARK, PORT28_FN7), PINMUX_DATA(SIM_RST_MARK, PORT29_FN1), \ PINMUX_DATA(PORT29_TPU1TO1_MARK, PORT29_FN4), PINMUX_DATA(SIM_CLK_MARK, PORT30_FN1), \ PINMUX_DATA(PORT30_VIO_CKOR_MARK, PORT30_FN4), PINMUX_DATA(SIM_D_MARK, PORT31_FN1), \ PINMUX_DATA(PORT31_IROUT_MARK, PORT31_FN4), PINMUX_DATA(SCIFA4_TXD_MARK, PORT32_FN2), PINMUX_DATA(SCIFA4_RXD_MARK, PORT33_FN2), \ PINMUX_DATA(XWUP_MARK, PORT33_FN3), PINMUX_DATA(SCIFA4_RTS__MARK, PORT34_FN2), PINMUX_DATA(SCIFA4_CTS__MARK, PORT35_FN2), PINMUX_DATA(FSIBOBT_MARK, PORT36_FN1), \ PINMUX_DATA(FSIBIBT_MARK, PORT36_FN2), PINMUX_DATA(FSIBOLR_MARK, PORT37_FN1), \ PINMUX_DATA(FSIBILR_MARK, PORT37_FN2), PINMUX_DATA(FSIBOSLD_MARK, PORT38_FN1), PINMUX_DATA(FSIBISLD_MARK, PORT39_FN1), PINMUX_DATA(VACK_MARK, PORT40_FN1), PINMUX_DATA(XTAL1L_MARK, PORT41_FN1), PINMUX_DATA(SCIFA0_RTS__MARK, PORT42_FN2), \ PINMUX_DATA(FSICOSLDT2_MARK, PORT42_FN3), PINMUX_DATA(SCIFA0_RXD_MARK, PORT43_FN2), PINMUX_DATA(SCIFA0_CTS__MARK, PORT44_FN2), \ PINMUX_DATA(FSICOSLDT1_MARK, PORT44_FN3), PINMUX_DATA(FSICOBT_MARK, PORT45_FN1), \ PINMUX_DATA(FSICIBT_MARK, PORT45_FN2), \ PINMUX_DATA(FSIDOBT_MARK, PORT45_FN3), \ PINMUX_DATA(FSIDIBT_MARK, PORT45_FN4), PINMUX_DATA(FSICOLR_MARK, PORT46_FN1), \ PINMUX_DATA(FSICILR_MARK, PORT46_FN2), \ PINMUX_DATA(FSIDOLR_MARK, PORT46_FN3), \ PINMUX_DATA(FSIDILR_MARK, PORT46_FN4), PINMUX_DATA(FSICOSLD_MARK, PORT47_FN1), \ PINMUX_DATA(PORT47_FSICSPDIF_MARK, PORT47_FN2), PINMUX_DATA(FSICISLD_MARK, PORT48_FN1), \ PINMUX_DATA(FSIDISLD_MARK, PORT48_FN3), PINMUX_DATA(FSIACK_MARK, PORT49_FN1), \ PINMUX_DATA(PORT49_IRDA_OUT_MARK, PORT49_FN2, MSEL4CR_MSEL19_1), \ PINMUX_DATA(PORT49_IROUT_MARK, PORT49_FN4), \ PINMUX_DATA(FSIAOMC_MARK, PORT49_FN5), PINMUX_DATA(FSIAOLR_MARK, PORT50_FN1), \ PINMUX_DATA(BBIF2_TSYNC2_MARK, PORT50_FN2), \ PINMUX_DATA(TPU2TO2_MARK, PORT50_FN3), \ PINMUX_DATA(FSIAILR_MARK, PORT50_FN5), PINMUX_DATA(FSIAOBT_MARK, PORT51_FN1), \ PINMUX_DATA(BBIF2_TSCK2_MARK, PORT51_FN2), \ PINMUX_DATA(TPU2TO3_MARK, PORT51_FN3), \ PINMUX_DATA(FSIAIBT_MARK, PORT51_FN5), PINMUX_DATA(FSIAOSLD_MARK, PORT52_FN1), \ PINMUX_DATA(BBIF2_TXD2_MARK, PORT52_FN2), PINMUX_DATA(FSIASPDIF_MARK, PORT53_FN1), \ PINMUX_DATA(PORT53_IRDA_IN_MARK, PORT53_FN2, MSEL4CR_MSEL19_1), \ PINMUX_DATA(TPU3TO3_MARK, PORT53_FN3), \ PINMUX_DATA(FSIBSPDIF_MARK, PORT53_FN5), \ PINMUX_DATA(PORT53_FSICSPDIF_MARK, PORT53_FN6), PINMUX_DATA(FSIBCK_MARK, PORT54_FN1), \ PINMUX_DATA(PORT54_IRDA_FIRSEL_MARK, PORT54_FN2, MSEL4CR_MSEL19_1), \ PINMUX_DATA(TPU3TO2_MARK, PORT54_FN3), \ PINMUX_DATA(FSIBOMC_MARK, PORT54_FN5), \ PINMUX_DATA(FSICCK_MARK, PORT54_FN6), \ PINMUX_DATA(FSICOMC_MARK, PORT54_FN7), PINMUX_DATA(FSIAISLD_MARK, PORT55_FN1), \ PINMUX_DATA(TPU0TO0_MARK, PORT55_FN3), PINMUX_DATA(A0_MARK, PORT57_FN1), \ PINMUX_DATA(BS__MARK, PORT57_FN2), PINMUX_DATA(A12_MARK, PORT58_FN1), \ PINMUX_DATA(PORT58_KEYOUT7_MARK, PORT58_FN2), \ PINMUX_DATA(TPU4TO2_MARK, PORT58_FN4), PINMUX_DATA(A13_MARK, PORT59_FN1), \ PINMUX_DATA(PORT59_KEYOUT6_MARK, PORT59_FN2), \ PINMUX_DATA(TPU0TO1_MARK, PORT59_FN4), PINMUX_DATA(A14_MARK, PORT60_FN1), \ PINMUX_DATA(KEYOUT5_MARK, PORT60_FN2), PINMUX_DATA(A15_MARK, PORT61_FN1), \ PINMUX_DATA(KEYOUT4_MARK, PORT61_FN2), PINMUX_DATA(A16_MARK, PORT62_FN1), \ PINMUX_DATA(KEYOUT3_MARK, PORT62_FN2), \ PINMUX_DATA(MSIOF0_SS1_MARK, PORT62_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A17_MARK, PORT63_FN1), \ PINMUX_DATA(KEYOUT2_MARK, PORT63_FN2), \ PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT63_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A18_MARK, PORT64_FN1), \ PINMUX_DATA(KEYOUT1_MARK, PORT64_FN2), \ PINMUX_DATA(MSIOF0_TSCK_MARK, PORT64_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A19_MARK, PORT65_FN1), \ PINMUX_DATA(KEYOUT0_MARK, PORT65_FN2), \ PINMUX_DATA(MSIOF0_TXD_MARK, PORT65_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A20_MARK, PORT66_FN1), \ PINMUX_DATA(KEYIN0_MARK, PORT66_FN2), \ PINMUX_DATA(MSIOF0_RSCK_MARK, PORT66_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A21_MARK, PORT67_FN1), \ PINMUX_DATA(KEYIN1_MARK, PORT67_FN2), \ PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT67_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A22_MARK, PORT68_FN1), \ PINMUX_DATA(KEYIN2_MARK, PORT68_FN2), \ PINMUX_DATA(MSIOF0_MCK0_MARK, PORT68_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A23_MARK, PORT69_FN1), \ PINMUX_DATA(KEYIN3_MARK, PORT69_FN2), \ PINMUX_DATA(MSIOF0_MCK1_MARK, PORT69_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A24_MARK, PORT70_FN1), \ PINMUX_DATA(KEYIN4_MARK, PORT70_FN2), \ PINMUX_DATA(MSIOF0_RXD_MARK, PORT70_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A25_MARK, PORT71_FN1), \ PINMUX_DATA(KEYIN5_MARK, PORT71_FN2), \ PINMUX_DATA(MSIOF0_SS2_MARK, PORT71_FN4, MSEL3CR_MSEL11_0), PINMUX_DATA(A26_MARK, PORT72_FN1), \ PINMUX_DATA(KEYIN6_MARK, PORT72_FN2), PINMUX_DATA(KEYIN7_MARK, PORT73_FN2), PINMUX_DATA(D0_NAF0_MARK, PORT74_FN1), PINMUX_DATA(D1_NAF1_MARK, PORT75_FN1), PINMUX_DATA(D2_NAF2_MARK, PORT76_FN1), PINMUX_DATA(D3_NAF3_MARK, PORT77_FN1), PINMUX_DATA(D4_NAF4_MARK, PORT78_FN1), PINMUX_DATA(D5_NAF5_MARK, PORT79_FN1), PINMUX_DATA(D6_NAF6_MARK, PORT80_FN1), PINMUX_DATA(D7_NAF7_MARK, PORT81_FN1), PINMUX_DATA(D8_NAF8_MARK, PORT82_FN1), PINMUX_DATA(D9_NAF9_MARK, PORT83_FN1), PINMUX_DATA(D10_NAF10_MARK, PORT84_FN1), PINMUX_DATA(D11_NAF11_MARK, PORT85_FN1), PINMUX_DATA(D12_NAF12_MARK, PORT86_FN1), PINMUX_DATA(D13_NAF13_MARK, PORT87_FN1), PINMUX_DATA(D14_NAF14_MARK, PORT88_FN1), PINMUX_DATA(D15_NAF15_MARK, PORT89_FN1), PINMUX_DATA(CS4__MARK, PORT90_FN1), PINMUX_DATA(CS5A__MARK, PORT91_FN1), \ PINMUX_DATA(PORT91_RDWR_MARK, PORT91_FN2), PINMUX_DATA(CS5B__MARK, PORT92_FN1), \ PINMUX_DATA(FCE1__MARK, PORT92_FN2), PINMUX_DATA(CS6B__MARK, PORT93_FN1), \ PINMUX_DATA(DACK0_MARK, PORT93_FN4), PINMUX_DATA(FCE0__MARK, PORT94_FN1), \ PINMUX_DATA(CS6A__MARK, PORT94_FN2), PINMUX_DATA(WAIT__MARK, PORT95_FN1), \ PINMUX_DATA(DREQ0_MARK, PORT95_FN2), PINMUX_DATA(RD__FSC_MARK, PORT96_FN1), PINMUX_DATA(WE0__FWE_MARK, PORT97_FN1), \ PINMUX_DATA(RDWR_FWE_MARK, PORT97_FN2), PINMUX_DATA(WE1__MARK, PORT98_FN1), PINMUX_DATA(FRB_MARK, PORT99_FN1), PINMUX_DATA(CKO_MARK, PORT100_FN1), PINMUX_DATA(NBRSTOUT__MARK, PORT101_FN1), PINMUX_DATA(NBRST__MARK, PORT102_FN1), PINMUX_DATA(BBIF2_TXD_MARK, PORT103_FN3), PINMUX_DATA(BBIF2_RXD_MARK, PORT104_FN3), PINMUX_DATA(BBIF2_SYNC_MARK, PORT105_FN3), PINMUX_DATA(BBIF2_SCK_MARK, PORT106_FN3), PINMUX_DATA(SCIFA3_CTS__MARK, PORT107_FN3), \ PINMUX_DATA(MFG3_IN2_MARK, PORT107_FN4), PINMUX_DATA(SCIFA3_RXD_MARK, PORT108_FN3), \ PINMUX_DATA(MFG3_IN1_MARK, PORT108_FN4), PINMUX_DATA(BBIF1_SS2_MARK, PORT109_FN2), \ PINMUX_DATA(SCIFA3_RTS__MARK, PORT109_FN3), \ PINMUX_DATA(MFG3_OUT1_MARK, PORT109_FN4), PINMUX_DATA(SCIFA3_TXD_MARK, PORT110_FN3), PINMUX_DATA(HSI_RX_DATA_MARK, PORT111_FN1), \ PINMUX_DATA(BBIF1_RXD_MARK, PORT111_FN3), PINMUX_DATA(HSI_TX_WAKE_MARK, PORT112_FN1), \ PINMUX_DATA(BBIF1_TSCK_MARK, PORT112_FN3), PINMUX_DATA(HSI_TX_DATA_MARK, PORT113_FN1), \ PINMUX_DATA(BBIF1_TSYNC_MARK, PORT113_FN3), PINMUX_DATA(HSI_TX_READY_MARK, PORT114_FN1), \ PINMUX_DATA(BBIF1_TXD_MARK, PORT114_FN3), PINMUX_DATA(HSI_RX_READY_MARK, PORT115_FN1), \ PINMUX_DATA(BBIF1_RSCK_MARK, PORT115_FN3), \ PINMUX_DATA(PORT115_I2C_SCL2_MARK, PORT115_FN5, MSEL2CR_MSEL17_1), \ PINMUX_DATA(PORT115_I2C_SCL3_MARK, PORT115_FN6, MSEL2CR_MSEL19_1), PINMUX_DATA(HSI_RX_WAKE_MARK, PORT116_FN1), \ PINMUX_DATA(BBIF1_RSYNC_MARK, PORT116_FN3), \ PINMUX_DATA(PORT116_I2C_SDA2_MARK, PORT116_FN5, MSEL2CR_MSEL17_1), \ PINMUX_DATA(PORT116_I2C_SDA3_MARK, PORT116_FN6, MSEL2CR_MSEL19_1), PINMUX_DATA(HSI_RX_FLAG_MARK, PORT117_FN1), \ PINMUX_DATA(BBIF1_SS1_MARK, PORT117_FN2), \ PINMUX_DATA(BBIF1_FLOW_MARK, PORT117_FN3), PINMUX_DATA(HSI_TX_FLAG_MARK, PORT118_FN1), PINMUX_DATA(VIO_VD_MARK, PORT128_FN1), \ PINMUX_DATA(PORT128_LCD2VSYN_MARK, PORT128_FN4, MSEL3CR_MSEL2_0), \ PINMUX_DATA(VIO2_VD_MARK, PORT128_FN6, MSEL4CR_MSEL27_0), \ PINMUX_DATA(LCD2D0_MARK, PORT128_FN7), PINMUX_DATA(VIO_HD_MARK, PORT129_FN1), \ PINMUX_DATA(PORT129_LCD2HSYN_MARK, PORT129_FN4), \ PINMUX_DATA(PORT129_LCD2CS__MARK, PORT129_FN5), \ PINMUX_DATA(VIO2_HD_MARK, PORT129_FN6, MSEL4CR_MSEL27_0), \ PINMUX_DATA(LCD2D1_MARK, PORT129_FN7), PINMUX_DATA(VIO_D0_MARK, PORT130_FN1), \ PINMUX_DATA(PORT130_MSIOF2_RXD_MARK, PORT130_FN3, MSEL4CR_MSEL11_0, MSEL4CR_MSEL10_1), \ PINMUX_DATA(LCD2D10_MARK, PORT130_FN7), PINMUX_DATA(VIO_D1_MARK, PORT131_FN1), \ PINMUX_DATA(PORT131_KEYOUT6_MARK, PORT131_FN2), \ PINMUX_DATA(PORT131_MSIOF2_SS1_MARK, PORT131_FN3), \ PINMUX_DATA(PORT131_KEYOUT11_MARK, PORT131_FN4), \ PINMUX_DATA(LCD2D11_MARK, PORT131_FN7), PINMUX_DATA(VIO_D2_MARK, PORT132_FN1), \ PINMUX_DATA(PORT132_KEYOUT7_MARK, PORT132_FN2), \ PINMUX_DATA(PORT132_MSIOF2_SS2_MARK, PORT132_FN3), \ PINMUX_DATA(PORT132_KEYOUT10_MARK, PORT132_FN4), \ PINMUX_DATA(LCD2D12_MARK, PORT132_FN7), PINMUX_DATA(VIO_D3_MARK, PORT133_FN1), \ PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT133_FN3, MSEL4CR_MSEL11_0), \ PINMUX_DATA(LCD2D13_MARK, PORT133_FN7), PINMUX_DATA(VIO_D4_MARK, PORT134_FN1), \ PINMUX_DATA(MSIOF2_TXD_MARK, PORT134_FN3, MSEL4CR_MSEL11_0), \ PINMUX_DATA(LCD2D14_MARK, PORT134_FN7), PINMUX_DATA(VIO_D5_MARK, PORT135_FN1), \ PINMUX_DATA(MSIOF2_TSCK_MARK, PORT135_FN3, MSEL4CR_MSEL11_0), \ PINMUX_DATA(LCD2D15_MARK, PORT135_FN7), PINMUX_DATA(VIO_D6_MARK, PORT136_FN1), \ PINMUX_DATA(PORT136_KEYOUT8_MARK, PORT136_FN2), \ PINMUX_DATA(LCD2D16_MARK, PORT136_FN7), PINMUX_DATA(VIO_D7_MARK, PORT137_FN1), \ PINMUX_DATA(PORT137_KEYOUT9_MARK, PORT137_FN2), \ PINMUX_DATA(LCD2D17_MARK, PORT137_FN7), PINMUX_DATA(VIO_D8_MARK, PORT138_FN1), \ PINMUX_DATA(PORT138_KEYOUT8_MARK, PORT138_FN2), \ PINMUX_DATA(VIO2_D0_MARK, PORT138_FN6), \ PINMUX_DATA(LCD2D6_MARK, PORT138_FN7), PINMUX_DATA(VIO_D9_MARK, PORT139_FN1), \ PINMUX_DATA(PORT139_KEYOUT9_MARK, PORT139_FN2), \ PINMUX_DATA(VIO2_D1_MARK, PORT139_FN6), \ PINMUX_DATA(LCD2D7_MARK, PORT139_FN7), PINMUX_DATA(VIO_D10_MARK, PORT140_FN1), \ PINMUX_DATA(TPU0TO2_MARK, PORT140_FN4), \ PINMUX_DATA(VIO2_D2_MARK, PORT140_FN6), \ PINMUX_DATA(LCD2D8_MARK, PORT140_FN7), PINMUX_DATA(VIO_D11_MARK, PORT141_FN1), \ PINMUX_DATA(TPU0TO3_MARK, PORT141_FN4), \ PINMUX_DATA(VIO2_D3_MARK, PORT141_FN6), \ PINMUX_DATA(LCD2D9_MARK, PORT141_FN7), PINMUX_DATA(VIO_D12_MARK, PORT142_FN1), \ PINMUX_DATA(PORT142_KEYOUT10_MARK, PORT142_FN2), \ PINMUX_DATA(VIO2_D4_MARK, PORT142_FN6), \ PINMUX_DATA(LCD2D2_MARK, PORT142_FN7), PINMUX_DATA(VIO_D13_MARK, PORT143_FN1), \ PINMUX_DATA(PORT143_KEYOUT11_MARK, PORT143_FN2), \ PINMUX_DATA(PORT143_KEYOUT6_MARK, PORT143_FN3), \ PINMUX_DATA(VIO2_D5_MARK, PORT143_FN6), \ PINMUX_DATA(LCD2D3_MARK, PORT143_FN7), PINMUX_DATA(VIO_D14_MARK, PORT144_FN1), \ PINMUX_DATA(PORT144_KEYOUT7_MARK, PORT144_FN2), \ PINMUX_DATA(VIO2_D6_MARK, PORT144_FN6), \ PINMUX_DATA(LCD2D4_MARK, PORT144_FN7), PINMUX_DATA(VIO_D15_MARK, PORT145_FN1), \ PINMUX_DATA(TPU1TO3_MARK, PORT145_FN3), \ PINMUX_DATA(PORT145_LCD2DISP_MARK, PORT145_FN4), \ PINMUX_DATA(PORT145_LCD2RS_MARK, PORT145_FN5), \ PINMUX_DATA(VIO2_D7_MARK, PORT145_FN6), \ PINMUX_DATA(LCD2D5_MARK, PORT145_FN7), PINMUX_DATA(VIO_CLK_MARK, PORT146_FN1), \ PINMUX_DATA(LCD2DCK_MARK, PORT146_FN4), \ PINMUX_DATA(PORT146_LCD2WR__MARK, PORT146_FN5), \ PINMUX_DATA(VIO2_CLK_MARK, PORT146_FN6, MSEL4CR_MSEL27_0), \ PINMUX_DATA(LCD2D18_MARK, PORT146_FN7), PINMUX_DATA(VIO_FIELD_MARK, PORT147_FN1), \ PINMUX_DATA(LCD2RD__MARK, PORT147_FN4), \ PINMUX_DATA(VIO2_FIELD_MARK, PORT147_FN6, MSEL4CR_MSEL27_0), \ PINMUX_DATA(LCD2D19_MARK, PORT147_FN7), PINMUX_DATA(VIO_CKO_MARK, PORT148_FN1), PINMUX_DATA(A27_MARK, PORT149_FN1), \ PINMUX_DATA(PORT149_RDWR_MARK, PORT149_FN2), \ PINMUX_DATA(MFG0_IN1_MARK, PORT149_FN3), \ PINMUX_DATA(PORT149_KEYOUT9_MARK, PORT149_FN4), PINMUX_DATA(MFG0_IN2_MARK, PORT150_FN3), PINMUX_DATA(TS_SPSYNC3_MARK, PORT151_FN4), \ PINMUX_DATA(MSIOF2_RSCK_MARK, PORT151_FN5), PINMUX_DATA(TS_SDAT3_MARK, PORT152_FN4), \ PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT152_FN5), PINMUX_DATA(TPU1TO2_MARK, PORT153_FN3), \ PINMUX_DATA(TS_SDEN3_MARK, PORT153_FN4), \ PINMUX_DATA(PORT153_MSIOF2_SS1_MARK, PORT153_FN5), PINMUX_DATA(SCIFA2_TXD1_MARK, PORT154_FN2, MSEL3CR_MSEL9_0), \ PINMUX_DATA(MSIOF2_MCK0_MARK, PORT154_FN5), PINMUX_DATA(SCIFA2_RXD1_MARK, PORT155_FN2, MSEL3CR_MSEL9_0), \ PINMUX_DATA(MSIOF2_MCK1_MARK, PORT155_FN5), PINMUX_DATA(SCIFA2_RTS1__MARK, PORT156_FN2, MSEL3CR_MSEL9_0), \ PINMUX_DATA(PORT156_MSIOF2_SS2_MARK, PORT156_FN5), PINMUX_DATA(SCIFA2_CTS1__MARK, PORT157_FN2, MSEL3CR_MSEL9_0), \ PINMUX_DATA(PORT157_MSIOF2_RXD_MARK, PORT157_FN5, MSEL4CR_MSEL11_0, MSEL4CR_MSEL10_0), PINMUX_DATA(DINT__MARK, PORT158_FN1), \ PINMUX_DATA(SCIFA2_SCK1_MARK, PORT158_FN2, MSEL3CR_MSEL9_0), \ PINMUX_DATA(TS_SCK3_MARK, PORT158_FN4), PINMUX_DATA(PORT159_SCIFB_SCK_MARK, PORT159_FN1, MSEL4CR_MSEL22_0), \ PINMUX_DATA(PORT159_SCIFA5_SCK_MARK, PORT159_FN2, MSEL4CR_MSEL21_1), \ PINMUX_DATA(NMI_MARK, PORT159_FN3), PINMUX_DATA(PORT160_SCIFB_TXD_MARK, PORT160_FN1, MSEL4CR_MSEL22_0), \ PINMUX_DATA(PORT160_SCIFA5_TXD_MARK, PORT160_FN2, MSEL4CR_MSEL21_1), PINMUX_DATA(PORT161_SCIFB_CTS__MARK, PORT161_FN1, MSEL4CR_MSEL22_0), \ PINMUX_DATA(PORT161_SCIFA5_CTS__MARK, PORT161_FN2, MSEL4CR_MSEL21_1), PINMUX_DATA(PORT162_SCIFB_RXD_MARK, PORT162_FN1, MSEL4CR_MSEL22_0), \ PINMUX_DATA(PORT162_SCIFA5_RXD_MARK, PORT162_FN2, MSEL4CR_MSEL21_1), PINMUX_DATA(PORT163_SCIFB_RTS__MARK, PORT163_FN1, MSEL4CR_MSEL22_0), \ PINMUX_DATA(PORT163_SCIFA5_RTS__MARK, PORT163_FN2, MSEL4CR_MSEL21_1), \ PINMUX_DATA(TPU3TO0_MARK, PORT163_FN5), PINMUX_DATA(LCDD0_MARK, PORT192_FN1), PINMUX_DATA(LCDD1_MARK, PORT193_FN1), \ PINMUX_DATA(PORT193_SCIFA5_CTS__MARK, PORT193_FN3, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_1), \ PINMUX_DATA(BBIF2_TSYNC1_MARK, PORT193_FN5), PINMUX_DATA(LCDD2_MARK, PORT194_FN1), \ PINMUX_DATA(PORT194_SCIFA5_RTS__MARK, PORT194_FN3, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_1), \ PINMUX_DATA(BBIF2_TSCK1_MARK, PORT194_FN5), PINMUX_DATA(LCDD3_MARK, PORT195_FN1), \ PINMUX_DATA(PORT195_SCIFA5_RXD_MARK, PORT195_FN3, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_1), \ PINMUX_DATA(BBIF2_TXD1_MARK, PORT195_FN5), PINMUX_DATA(LCDD4_MARK, PORT196_FN1), \ PINMUX_DATA(PORT196_SCIFA5_TXD_MARK, PORT196_FN3, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_1), PINMUX_DATA(LCDD5_MARK, PORT197_FN1), \ PINMUX_DATA(PORT197_SCIFA5_SCK_MARK, PORT197_FN3, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_1), \ PINMUX_DATA(MFG2_OUT2_MARK, PORT197_FN5), \ PINMUX_DATA(TPU2TO1_MARK, PORT197_FN7), PINMUX_DATA(LCDD6_MARK, PORT198_FN1), PINMUX_DATA(LCDD7_MARK, PORT199_FN1), \ PINMUX_DATA(TPU4TO1_MARK, PORT199_FN2), \ PINMUX_DATA(MFG4_OUT2_MARK, PORT199_FN5), PINMUX_DATA(LCDD8_MARK, PORT200_FN1), \ PINMUX_DATA(D16_MARK, PORT200_FN6), PINMUX_DATA(LCDD9_MARK, PORT201_FN1), \ PINMUX_DATA(D17_MARK, PORT201_FN6), PINMUX_DATA(LCDD10_MARK, PORT202_FN1), \ PINMUX_DATA(D18_MARK, PORT202_FN6), PINMUX_DATA(LCDD11_MARK, PORT203_FN1), \ PINMUX_DATA(D19_MARK, PORT203_FN6), PINMUX_DATA(LCDD12_MARK, PORT204_FN1), \ PINMUX_DATA(D20_MARK, PORT204_FN6), PINMUX_DATA(LCDD13_MARK, PORT205_FN1), \ PINMUX_DATA(D21_MARK, PORT205_FN6), PINMUX_DATA(LCDD14_MARK, PORT206_FN1), \ PINMUX_DATA(D22_MARK, PORT206_FN6), PINMUX_DATA(LCDD15_MARK, PORT207_FN1), \ PINMUX_DATA(PORT207_MSIOF0L_SS1_MARK, PORT207_FN2, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D23_MARK, PORT207_FN6), PINMUX_DATA(LCDD16_MARK, PORT208_FN1), \ PINMUX_DATA(PORT208_MSIOF0L_SS2_MARK, PORT208_FN2, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D24_MARK, PORT208_FN6), PINMUX_DATA(LCDD17_MARK, PORT209_FN1), \ PINMUX_DATA(D25_MARK, PORT209_FN6), PINMUX_DATA(LCDD18_MARK, PORT210_FN1), \ PINMUX_DATA(DREQ2_MARK, PORT210_FN2), \ PINMUX_DATA(PORT210_MSIOF0L_SS1_MARK, PORT210_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D26_MARK, PORT210_FN6), PINMUX_DATA(LCDD19_MARK, PORT211_FN1), \ PINMUX_DATA(PORT211_MSIOF0L_SS2_MARK, PORT211_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D27_MARK, PORT211_FN6), PINMUX_DATA(LCDD20_MARK, PORT212_FN1), \ PINMUX_DATA(TS_SPSYNC1_MARK, PORT212_FN2), \ PINMUX_DATA(MSIOF0L_MCK0_MARK, PORT212_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D28_MARK, PORT212_FN6), PINMUX_DATA(LCDD21_MARK, PORT213_FN1), \ PINMUX_DATA(TS_SDAT1_MARK, PORT213_FN2), \ PINMUX_DATA(MSIOF0L_MCK1_MARK, PORT213_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D29_MARK, PORT213_FN6), PINMUX_DATA(LCDD22_MARK, PORT214_FN1), \ PINMUX_DATA(TS_SDEN1_MARK, PORT214_FN2), \ PINMUX_DATA(MSIOF0L_RSCK_MARK, PORT214_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D30_MARK, PORT214_FN6), PINMUX_DATA(LCDD23_MARK, PORT215_FN1), \ PINMUX_DATA(TS_SCK1_MARK, PORT215_FN2), \ PINMUX_DATA(MSIOF0L_RSYNC_MARK, PORT215_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(D31_MARK, PORT215_FN6), PINMUX_DATA(LCDDCK_MARK, PORT216_FN1), \ PINMUX_DATA(LCDWR__MARK, PORT216_FN2), PINMUX_DATA(LCDRD__MARK, PORT217_FN1), \ PINMUX_DATA(DACK2_MARK, PORT217_FN2), \ PINMUX_DATA(PORT217_LCD2RS_MARK, PORT217_FN3), \ PINMUX_DATA(MSIOF0L_TSYNC_MARK, PORT217_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(VIO2_FIELD3_MARK, PORT217_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_1), \ PINMUX_DATA(PORT217_LCD2DISP_MARK, PORT217_FN7), PINMUX_DATA(LCDHSYN_MARK, PORT218_FN1), \ PINMUX_DATA(LCDCS__MARK, PORT218_FN2), \ PINMUX_DATA(LCDCS2__MARK, PORT218_FN3), \ PINMUX_DATA(DACK3_MARK, PORT218_FN4), \ PINMUX_DATA(PORT218_VIO_CKOR_MARK, PORT218_FN5), PINMUX_DATA(LCDDISP_MARK, PORT219_FN1), \ PINMUX_DATA(LCDRS_MARK, PORT219_FN2), \ PINMUX_DATA(PORT219_LCD2WR__MARK, PORT219_FN3), \ PINMUX_DATA(DREQ3_MARK, PORT219_FN4), \ PINMUX_DATA(MSIOF0L_TSCK_MARK, PORT219_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(VIO2_CLK3_MARK, PORT219_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_1), \ PINMUX_DATA(LCD2DCK_2_MARK, PORT219_FN7), PINMUX_DATA(LCDVSYN_MARK, PORT220_FN1), \ PINMUX_DATA(LCDVSYN2_MARK, PORT220_FN2), PINMUX_DATA(LCDLCLK_MARK, PORT221_FN1), \ PINMUX_DATA(DREQ1_MARK, PORT221_FN2), \ PINMUX_DATA(PORT221_LCD2CS__MARK, PORT221_FN3), \ PINMUX_DATA(PWEN_MARK, PORT221_FN4), \ PINMUX_DATA(MSIOF0L_RXD_MARK, PORT221_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(VIO2_HD3_MARK, PORT221_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_1), \ PINMUX_DATA(PORT221_LCD2HSYN_MARK, PORT221_FN7), PINMUX_DATA(LCDDON_MARK, PORT222_FN1), \ PINMUX_DATA(LCDDON2_MARK, PORT222_FN2), \ PINMUX_DATA(DACK1_MARK, PORT222_FN3), \ PINMUX_DATA(OVCN_MARK, PORT222_FN4), \ PINMUX_DATA(MSIOF0L_TXD_MARK, PORT222_FN5, MSEL3CR_MSEL11_1), \ PINMUX_DATA(VIO2_VD3_MARK, PORT222_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_1), \ PINMUX_DATA(PORT222_LCD2VSYN_MARK, PORT222_FN7, MSEL3CR_MSEL2_1), PINMUX_DATA(SCIFA1_TXD_MARK, PORT225_FN2), \ PINMUX_DATA(OVCN2_MARK, PORT225_FN4), PINMUX_DATA(EXTLP_MARK, PORT226_FN1), \ PINMUX_DATA(SCIFA1_SCK_MARK, PORT226_FN2), \ PINMUX_DATA(PORT226_VIO_CKO2_MARK, PORT226_FN5), PINMUX_DATA(SCIFA1_RTS__MARK, PORT227_FN2), \ PINMUX_DATA(IDIN_MARK, PORT227_FN4), PINMUX_DATA(SCIFA1_RXD_MARK, PORT228_FN2), PINMUX_DATA(SCIFA1_CTS__MARK, PORT229_FN2), \ PINMUX_DATA(MFG1_IN1_MARK, PORT229_FN3), PINMUX_DATA(MSIOF1_TXD_MARK, PORT230_FN1), \ PINMUX_DATA(SCIFA2_TXD2_MARK, PORT230_FN2, MSEL3CR_MSEL9_1), PINMUX_DATA(MSIOF1_TSYNC_MARK, PORT231_FN1), \ PINMUX_DATA(SCIFA2_CTS2__MARK, PORT231_FN2, MSEL3CR_MSEL9_1), PINMUX_DATA(MSIOF1_TSCK_MARK, PORT232_FN1), \ PINMUX_DATA(SCIFA2_SCK2_MARK, PORT232_FN2, MSEL3CR_MSEL9_1), PINMUX_DATA(MSIOF1_RXD_MARK, PORT233_FN1), \ PINMUX_DATA(SCIFA2_RXD2_MARK, PORT233_FN2, MSEL3CR_MSEL9_1), PINMUX_DATA(MSIOF1_RSCK_MARK, PORT234_FN1), \ PINMUX_DATA(SCIFA2_RTS2__MARK, PORT234_FN2, MSEL3CR_MSEL9_1), \ PINMUX_DATA(VIO2_CLK2_MARK, PORT234_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0), \ PINMUX_DATA(LCD2D20_MARK, PORT234_FN7), PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT235_FN1), \ PINMUX_DATA(MFG1_IN2_MARK, PORT235_FN3), \ PINMUX_DATA(VIO2_VD2_MARK, PORT235_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0), \ PINMUX_DATA(LCD2D21_MARK, PORT235_FN7), PINMUX_DATA(MSIOF1_MCK0_MARK, PORT236_FN1), \ PINMUX_DATA(PORT236_I2C_SDA2_MARK, PORT236_FN2, MSEL2CR_MSEL17_0, MSEL2CR_MSEL16_0), PINMUX_DATA(MSIOF1_MCK1_MARK, PORT237_FN1), \ PINMUX_DATA(PORT237_I2C_SCL2_MARK, PORT237_FN2, MSEL2CR_MSEL17_0, MSEL2CR_MSEL16_0), PINMUX_DATA(MSIOF1_SS1_MARK, PORT238_FN1), \ PINMUX_DATA(VIO2_FIELD2_MARK, PORT238_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0), \ PINMUX_DATA(LCD2D22_MARK, PORT238_FN7), PINMUX_DATA(MSIOF1_SS2_MARK, PORT239_FN1), \ PINMUX_DATA(VIO2_HD2_MARK, PORT239_FN6, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0), \ PINMUX_DATA(LCD2D23_MARK, PORT239_FN7), PINMUX_DATA(SCIFA6_TXD_MARK, PORT240_FN1), PINMUX_DATA(PORT241_IRDA_OUT_MARK, PORT241_FN1, MSEL4CR_MSEL19_0), \ PINMUX_DATA(PORT241_IROUT_MARK, PORT241_FN2), \ PINMUX_DATA(MFG4_OUT1_MARK, PORT241_FN3), \ PINMUX_DATA(TPU4TO0_MARK, PORT241_FN4), PINMUX_DATA(PORT242_IRDA_IN_MARK, PORT242_FN1, MSEL4CR_MSEL19_0), \ PINMUX_DATA(MFG4_IN2_MARK, PORT242_FN3), PINMUX_DATA(PORT243_IRDA_FIRSEL_MARK, PORT243_FN1, MSEL4CR_MSEL19_0), \ PINMUX_DATA(PORT243_VIO_CKO2_MARK, PORT243_FN2), PINMUX_DATA(PORT244_SCIFA5_CTS__MARK, PORT244_FN1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_0), \ PINMUX_DATA(MFG2_IN1_MARK, PORT244_FN2), \ PINMUX_DATA(PORT244_SCIFB_CTS__MARK, PORT244_FN3, MSEL4CR_MSEL22_1), \ PINMUX_DATA(MSIOF2R_RXD_MARK, PORT244_FN7, MSEL4CR_MSEL11_1), PINMUX_DATA(PORT245_SCIFA5_RTS__MARK, PORT245_FN1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_0), \ PINMUX_DATA(MFG2_IN2_MARK, PORT245_FN2), \ PINMUX_DATA(PORT245_SCIFB_RTS__MARK, PORT245_FN3, MSEL4CR_MSEL22_1), \ PINMUX_DATA(MSIOF2R_TXD_MARK, PORT245_FN7, MSEL4CR_MSEL11_1), PINMUX_DATA(PORT246_SCIFA5_RXD_MARK, PORT246_FN1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_0), \ PINMUX_DATA(MFG1_OUT1_MARK, PORT246_FN2), \ PINMUX_DATA(PORT246_SCIFB_RXD_MARK, PORT246_FN3, MSEL4CR_MSEL22_1), \ PINMUX_DATA(TPU1TO0_MARK, PORT246_FN4), PINMUX_DATA(PORT247_SCIFA5_TXD_MARK, PORT247_FN1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_0), \ PINMUX_DATA(MFG3_OUT2_MARK, PORT247_FN2), \ PINMUX_DATA(PORT247_SCIFB_TXD_MARK, PORT247_FN3, MSEL4CR_MSEL22_1), \ PINMUX_DATA(TPU3TO1_MARK, PORT247_FN4), PINMUX_DATA(PORT248_SCIFA5_SCK_MARK, PORT248_FN1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL20_0), \ PINMUX_DATA(MFG2_OUT1_MARK, PORT248_FN2), \ PINMUX_DATA(PORT248_SCIFB_SCK_MARK, PORT248_FN3, MSEL4CR_MSEL22_1), \ PINMUX_DATA(TPU2TO0_MARK, PORT248_FN4), \ PINMUX_DATA(PORT248_I2C_SCL3_MARK, PORT248_FN5, MSEL2CR_MSEL19_0, MSEL2CR_MSEL18_0), \ PINMUX_DATA(MSIOF2R_TSCK_MARK, PORT248_FN7, MSEL4CR_MSEL11_1), PINMUX_DATA(PORT249_IROUT_MARK, PORT249_FN1), \ PINMUX_DATA(MFG4_IN1_MARK, PORT249_FN2), \ PINMUX_DATA(PORT249_I2C_SDA3_MARK, PORT249_FN5, MSEL2CR_MSEL19_0, MSEL2CR_MSEL18_0), \ PINMUX_DATA(MSIOF2R_TSYNC_MARK, PORT249_FN7, MSEL4CR_MSEL11_1), PINMUX_DATA(SDHICLK0_MARK, PORT250_FN1), PINMUX_DATA(SDHICD0_MARK, PORT251_FN1), PINMUX_DATA(SDHID0_0_MARK, PORT252_FN1), PINMUX_DATA(SDHID0_1_MARK, PORT253_FN1), PINMUX_DATA(SDHID0_2_MARK, PORT254_FN1), PINMUX_DATA(SDHID0_3_MARK, PORT255_FN1), PINMUX_DATA(SDHICMD0_MARK, PORT256_FN1), PINMUX_DATA(SDHIWP0_MARK, PORT257_FN1), PINMUX_DATA(SDHICLK1_MARK, PORT258_FN1), PINMUX_DATA(SDHID1_0_MARK, PORT259_FN1), \ PINMUX_DATA(TS_SPSYNC2_MARK, PORT259_FN3), PINMUX_DATA(SDHID1_1_MARK, PORT260_FN1), \ PINMUX_DATA(TS_SDAT2_MARK, PORT260_FN3), PINMUX_DATA(SDHID1_2_MARK, PORT261_FN1), \ PINMUX_DATA(TS_SDEN2_MARK, PORT261_FN3), PINMUX_DATA(SDHID1_3_MARK, PORT262_FN1), \ PINMUX_DATA(TS_SCK2_MARK, PORT262_FN3), PINMUX_DATA(SDHICMD1_MARK, PORT263_FN1), PINMUX_DATA(SDHICLK2_MARK, PORT264_FN1), PINMUX_DATA(SDHID2_0_MARK, PORT265_FN1), \ PINMUX_DATA(TS_SPSYNC4_MARK, PORT265_FN3), PINMUX_DATA(SDHID2_1_MARK, PORT266_FN1), \ PINMUX_DATA(TS_SDAT4_MARK, PORT266_FN3), PINMUX_DATA(SDHID2_2_MARK, PORT267_FN1), \ PINMUX_DATA(TS_SDEN4_MARK, PORT267_FN3), PINMUX_DATA(SDHID2_3_MARK, PORT268_FN1), \ PINMUX_DATA(TS_SCK4_MARK, PORT268_FN3), PINMUX_DATA(SDHICMD2_MARK, PORT269_FN1), PINMUX_DATA(MMCCLK0_MARK, PORT270_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCD0_0_MARK, PORT271_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCD0_1_MARK, PORT272_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCD0_2_MARK, PORT273_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCD0_3_MARK, PORT274_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCD0_4_MARK, PORT275_FN1, MSEL4CR_MSEL15_0), \ PINMUX_DATA(TS_SPSYNC5_MARK, PORT275_FN3), PINMUX_DATA(MMCD0_5_MARK, PORT276_FN1, MSEL4CR_MSEL15_0), \ PINMUX_DATA(TS_SDAT5_MARK, PORT276_FN3), PINMUX_DATA(MMCD0_6_MARK, PORT277_FN1, MSEL4CR_MSEL15_0), \ PINMUX_DATA(TS_SDEN5_MARK, PORT277_FN3), PINMUX_DATA(MMCD0_7_MARK, PORT278_FN1, MSEL4CR_MSEL15_0), \ PINMUX_DATA(TS_SCK5_MARK, PORT278_FN3), PINMUX_DATA(MMCCMD0_MARK, PORT279_FN1, MSEL4CR_MSEL15_0), PINMUX_DATA(RESETOUTS__MARK, PORT281_FN1), \ PINMUX_DATA(EXTAL2OUT_MARK, PORT281_FN2), PINMUX_DATA(MCP_WAIT__MCP_FRB_MARK, PORT288_FN1), PINMUX_DATA(MCP_CKO_MARK, PORT289_FN1), \ PINMUX_DATA(MMCCLK1_MARK, PORT289_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D15_MCP_NAF15_MARK, PORT290_FN1), PINMUX_DATA(MCP_D14_MCP_NAF14_MARK, PORT291_FN1), PINMUX_DATA(MCP_D13_MCP_NAF13_MARK, PORT292_FN1), PINMUX_DATA(MCP_D12_MCP_NAF12_MARK, PORT293_FN1), PINMUX_DATA(MCP_D11_MCP_NAF11_MARK, PORT294_FN1), PINMUX_DATA(MCP_D10_MCP_NAF10_MARK, PORT295_FN1), PINMUX_DATA(MCP_D9_MCP_NAF9_MARK, PORT296_FN1), PINMUX_DATA(MCP_D8_MCP_NAF8_MARK, PORT297_FN1), \ PINMUX_DATA(MMCCMD1_MARK, PORT297_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D7_MCP_NAF7_MARK, PORT298_FN1), \ PINMUX_DATA(MMCD1_7_MARK, PORT298_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D6_MCP_NAF6_MARK, PORT299_FN1), \ PINMUX_DATA(MMCD1_6_MARK, PORT299_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D5_MCP_NAF5_MARK, PORT300_FN1), \ PINMUX_DATA(MMCD1_5_MARK, PORT300_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D4_MCP_NAF4_MARK, PORT301_FN1), \ PINMUX_DATA(MMCD1_4_MARK, PORT301_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D3_MCP_NAF3_MARK, PORT302_FN1), \ PINMUX_DATA(MMCD1_3_MARK, PORT302_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D2_MCP_NAF2_MARK, PORT303_FN1), \ PINMUX_DATA(MMCD1_2_MARK, PORT303_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D1_MCP_NAF1_MARK, PORT304_FN1), \ PINMUX_DATA(MMCD1_1_MARK, PORT304_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_D0_MCP_NAF0_MARK, PORT305_FN1), \ PINMUX_DATA(MMCD1_0_MARK, PORT305_FN2, MSEL4CR_MSEL15_1), PINMUX_DATA(MCP_NBRSTOUT__MARK, PORT306_FN1), PINMUX_DATA(MCP_WE0__MCP_FWE_MARK, PORT309_FN1), \ PINMUX_DATA(MCP_RDWR_MCP_FWE_MARK, PORT309_FN2), /* MSEL2 special cases */ PINMUX_DATA(TSIF2_TS_XX1_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0, MSEL2CR_MSEL12_0), PINMUX_DATA(TSIF2_TS_XX2_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_0, MSEL2CR_MSEL12_1), PINMUX_DATA(TSIF2_TS_XX3_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1, MSEL2CR_MSEL12_0), PINMUX_DATA(TSIF2_TS_XX4_MARK, MSEL2CR_MSEL14_0, MSEL2CR_MSEL13_1, MSEL2CR_MSEL12_1), PINMUX_DATA(TSIF2_TS_XX5_MARK, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0, MSEL2CR_MSEL12_0), PINMUX_DATA(TSIF1_TS_XX1_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0, MSEL2CR_MSEL9_0), PINMUX_DATA(TSIF1_TS_XX2_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_0, MSEL2CR_MSEL9_1), PINMUX_DATA(TSIF1_TS_XX3_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1, MSEL2CR_MSEL9_0), PINMUX_DATA(TSIF1_TS_XX4_MARK, MSEL2CR_MSEL11_0, MSEL2CR_MSEL10_1, MSEL2CR_MSEL9_1), PINMUX_DATA(TSIF1_TS_XX5_MARK, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0, MSEL2CR_MSEL9_0), PINMUX_DATA(TSIF0_TS_XX1_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0, MSEL2CR_MSEL6_0), PINMUX_DATA(TSIF0_TS_XX2_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_0, MSEL2CR_MSEL6_1), PINMUX_DATA(TSIF0_TS_XX3_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1, MSEL2CR_MSEL6_0), PINMUX_DATA(TSIF0_TS_XX4_MARK, MSEL2CR_MSEL8_0, MSEL2CR_MSEL7_1, MSEL2CR_MSEL6_1), PINMUX_DATA(TSIF0_TS_XX5_MARK, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0, MSEL2CR_MSEL6_0), PINMUX_DATA(MST1_TS_XX1_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0, MSEL2CR_MSEL3_0), PINMUX_DATA(MST1_TS_XX2_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_0, MSEL2CR_MSEL3_1), PINMUX_DATA(MST1_TS_XX3_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1, MSEL2CR_MSEL3_0), PINMUX_DATA(MST1_TS_XX4_MARK, MSEL2CR_MSEL5_0, MSEL2CR_MSEL4_1, MSEL2CR_MSEL3_1), PINMUX_DATA(MST1_TS_XX5_MARK, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0, MSEL2CR_MSEL3_0), PINMUX_DATA(MST0_TS_XX1_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0, MSEL2CR_MSEL0_0), PINMUX_DATA(MST0_TS_XX2_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_0, MSEL2CR_MSEL0_1), PINMUX_DATA(MST0_TS_XX3_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1, MSEL2CR_MSEL0_0), PINMUX_DATA(MST0_TS_XX4_MARK, MSEL2CR_MSEL2_0, MSEL2CR_MSEL1_1, MSEL2CR_MSEL0_1), PINMUX_DATA(MST0_TS_XX5_MARK, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0, MSEL2CR_MSEL0_0), /* MSEL3 special cases */ PINMUX_DATA(SDHI0_VCCQ_MC0_ON_MARK, MSEL3CR_MSEL28_1), PINMUX_DATA(SDHI0_VCCQ_MC0_OFF_MARK, MSEL3CR_MSEL28_0), PINMUX_DATA(DEBUG_MON_VIO_MARK, MSEL3CR_MSEL15_0), PINMUX_DATA(DEBUG_MON_LCDD_MARK, MSEL3CR_MSEL15_1), PINMUX_DATA(LCDC_LCDC0_MARK, MSEL3CR_MSEL6_0), PINMUX_DATA(LCDC_LCDC1_MARK, MSEL3CR_MSEL6_1), /* MSEL4 special cases */ PINMUX_DATA(IRQ9_MEM_INT_MARK, MSEL4CR_MSEL29_0), PINMUX_DATA(IRQ9_MCP_INT_MARK, MSEL4CR_MSEL29_1), PINMUX_DATA(A11_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_0), PINMUX_DATA(KEYOUT8_MARK, MSEL4CR_MSEL13_0, MSEL4CR_MSEL12_1), PINMUX_DATA(TPU4TO3_MARK, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0), PINMUX_DATA(RESETA_N_PU_ON_MARK, MSEL4CR_MSEL4_0), PINMUX_DATA(RESETA_N_PU_OFF_MARK, MSEL4CR_MSEL4_1), PINMUX_DATA(EDBGREQ_PD_MARK, MSEL4CR_MSEL1_0), PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1), /* Functions with pull-ups */ PINMUX_DATA(KEYIN0_PU_MARK, PORT66_FN2, PORT66_IN_PU), PINMUX_DATA(KEYIN1_PU_MARK, PORT67_FN2, PORT67_IN_PU), PINMUX_DATA(KEYIN2_PU_MARK, PORT68_FN2, PORT68_IN_PU), PINMUX_DATA(KEYIN3_PU_MARK, PORT69_FN2, PORT69_IN_PU), PINMUX_DATA(KEYIN4_PU_MARK, PORT70_FN2, PORT70_IN_PU), PINMUX_DATA(KEYIN5_PU_MARK, PORT71_FN2, PORT71_IN_PU), PINMUX_DATA(KEYIN6_PU_MARK, PORT72_FN2, PORT72_IN_PU), PINMUX_DATA(KEYIN7_PU_MARK, PORT73_FN2, PORT73_IN_PU), PINMUX_DATA(SDHID1_0_PU_MARK, PORT259_IN_PU, PORT259_FN1), PINMUX_DATA(SDHID1_1_PU_MARK, PORT260_IN_PU, PORT260_FN1), PINMUX_DATA(SDHID1_2_PU_MARK, PORT261_IN_PU, PORT261_FN1), PINMUX_DATA(SDHID1_3_PU_MARK, PORT262_IN_PU, PORT262_FN1), PINMUX_DATA(SDHICMD1_PU_MARK, PORT263_IN_PU, PORT263_FN1), PINMUX_DATA(MMCCMD0_PU_MARK, PORT279_FN1, PORT279_IN_PU, MSEL4CR_MSEL15_0), PINMUX_DATA(MMCCMD1_PU_MARK, PORT297_FN2, PORT279_IN_PU, MSEL4CR_MSEL15_1), PINMUX_DATA(FSIACK_PU_MARK, PORT49_FN1, PORT49_IN_PU), PINMUX_DATA(FSIAILR_PU_MARK, PORT50_FN5, PORT50_IN_PU), PINMUX_DATA(FSIAIBT_PU_MARK, PORT51_FN5, PORT51_IN_PU), PINMUX_DATA(FSIAISLD_PU_MARK, PORT55_FN1, PORT55_IN_PU), }; #define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA) #define GPIO_PORT_310() _310(_GPIO_PORT, , unused) #define GPIO_FN(str) PINMUX_GPIO(GPIO_FN_##str, str##_MARK) static struct pinmux_gpio pinmux_gpios[] = { GPIO_PORT_310(), /* Table 25-1 (Functions 0-7) */ GPIO_FN(VBUS_0), GPIO_FN(GPI0), GPIO_FN(GPI1), GPIO_FN(GPI2), GPIO_FN(GPI3), GPIO_FN(GPI4), GPIO_FN(GPI5), GPIO_FN(GPI6), GPIO_FN(GPI7), GPIO_FN(SCIFA7_RXD), GPIO_FN(SCIFA7_CTS_), GPIO_FN(GPO7), \ GPIO_FN(MFG0_OUT2), GPIO_FN(GPO6), \ GPIO_FN(MFG1_OUT2), GPIO_FN(GPO5), \ GPIO_FN(SCIFA0_SCK), \ GPIO_FN(FSICOSLDT3), \ GPIO_FN(PORT16_VIO_CKOR), GPIO_FN(SCIFA0_TXD), GPIO_FN(SCIFA7_TXD), GPIO_FN(SCIFA7_RTS_), \ GPIO_FN(PORT19_VIO_CKO2), GPIO_FN(GPO0), GPIO_FN(GPO1), GPIO_FN(GPO2), \ GPIO_FN(STATUS0), GPIO_FN(GPO3), \ GPIO_FN(STATUS1), GPIO_FN(GPO4), \ GPIO_FN(STATUS2), GPIO_FN(VINT), GPIO_FN(TCKON), GPIO_FN(XDVFS1), \ GPIO_FN(PORT27_I2C_SCL2), \ GPIO_FN(PORT27_I2C_SCL3), \ GPIO_FN(MFG0_OUT1), \ GPIO_FN(PORT27_IROUT), GPIO_FN(XDVFS2), \ GPIO_FN(PORT28_I2C_SDA2), \ GPIO_FN(PORT28_I2C_SDA3), \ GPIO_FN(PORT28_TPU1TO1), GPIO_FN(SIM_RST), \ GPIO_FN(PORT29_TPU1TO1), GPIO_FN(SIM_CLK), \ GPIO_FN(PORT30_VIO_CKOR), GPIO_FN(SIM_D), \ GPIO_FN(PORT31_IROUT), GPIO_FN(SCIFA4_TXD), GPIO_FN(SCIFA4_RXD), \ GPIO_FN(XWUP), GPIO_FN(SCIFA4_RTS_), GPIO_FN(SCIFA4_CTS_), GPIO_FN(FSIBOBT), \ GPIO_FN(FSIBIBT), GPIO_FN(FSIBOLR), \ GPIO_FN(FSIBILR), GPIO_FN(FSIBOSLD), GPIO_FN(FSIBISLD), GPIO_FN(VACK), GPIO_FN(XTAL1L), GPIO_FN(SCIFA0_RTS_), \ GPIO_FN(FSICOSLDT2), GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_CTS_), \ GPIO_FN(FSICOSLDT1), GPIO_FN(FSICOBT), \ GPIO_FN(FSICIBT), \ GPIO_FN(FSIDOBT), \ GPIO_FN(FSIDIBT), GPIO_FN(FSICOLR), \ GPIO_FN(FSICILR), \ GPIO_FN(FSIDOLR), \ GPIO_FN(FSIDILR), GPIO_FN(FSICOSLD), \ GPIO_FN(PORT47_FSICSPDIF), GPIO_FN(FSICISLD), \ GPIO_FN(FSIDISLD), GPIO_FN(FSIACK), \ GPIO_FN(PORT49_IRDA_OUT), \ GPIO_FN(PORT49_IROUT), \ GPIO_FN(FSIAOMC), GPIO_FN(FSIAOLR), \ GPIO_FN(BBIF2_TSYNC2), \ GPIO_FN(TPU2TO2), \ GPIO_FN(FSIAILR), GPIO_FN(FSIAOBT), \ GPIO_FN(BBIF2_TSCK2), \ GPIO_FN(TPU2TO3), \ GPIO_FN(FSIAIBT), GPIO_FN(FSIAOSLD), \ GPIO_FN(BBIF2_TXD2), GPIO_FN(FSIASPDIF), \ GPIO_FN(PORT53_IRDA_IN), \ GPIO_FN(TPU3TO3), \ GPIO_FN(FSIBSPDIF), \ GPIO_FN(PORT53_FSICSPDIF), GPIO_FN(FSIBCK), \ GPIO_FN(PORT54_IRDA_FIRSEL), \ GPIO_FN(TPU3TO2), \ GPIO_FN(FSIBOMC), \ GPIO_FN(FSICCK), \ GPIO_FN(FSICOMC), GPIO_FN(FSIAISLD), \ GPIO_FN(TPU0TO0), GPIO_FN(A0), \ GPIO_FN(BS_), GPIO_FN(A12), \ GPIO_FN(PORT58_KEYOUT7), \ GPIO_FN(TPU4TO2), GPIO_FN(A13), \ GPIO_FN(PORT59_KEYOUT6), \ GPIO_FN(TPU0TO1), GPIO_FN(A14), \ GPIO_FN(KEYOUT5), GPIO_FN(A15), \ GPIO_FN(KEYOUT4), GPIO_FN(A16), \ GPIO_FN(KEYOUT3), \ GPIO_FN(MSIOF0_SS1), GPIO_FN(A17), \ GPIO_FN(KEYOUT2), \ GPIO_FN(MSIOF0_TSYNC), GPIO_FN(A18), \ GPIO_FN(KEYOUT1), \ GPIO_FN(MSIOF0_TSCK), GPIO_FN(A19), \ GPIO_FN(KEYOUT0), \ GPIO_FN(MSIOF0_TXD), GPIO_FN(A20), \ GPIO_FN(KEYIN0), \ GPIO_FN(MSIOF0_RSCK), GPIO_FN(A21), \ GPIO_FN(KEYIN1), \ GPIO_FN(MSIOF0_RSYNC), GPIO_FN(A22), \ GPIO_FN(KEYIN2), \ GPIO_FN(MSIOF0_MCK0), GPIO_FN(A23), \ GPIO_FN(KEYIN3), \ GPIO_FN(MSIOF0_MCK1), GPIO_FN(A24), \ GPIO_FN(KEYIN4), \ GPIO_FN(MSIOF0_RXD), GPIO_FN(A25), \ GPIO_FN(KEYIN5), \ GPIO_FN(MSIOF0_SS2), GPIO_FN(A26), \ GPIO_FN(KEYIN6), GPIO_FN(KEYIN7), GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1), GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), GPIO_FN(D4_NAF4), GPIO_FN(D5_NAF5), GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7), GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), GPIO_FN(D10_NAF10), GPIO_FN(D11_NAF11), GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13), GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15), GPIO_FN(CS4_), GPIO_FN(CS5A_), \ GPIO_FN(PORT91_RDWR), GPIO_FN(CS5B_), \ GPIO_FN(FCE1_), GPIO_FN(CS6B_), \ GPIO_FN(DACK0), GPIO_FN(FCE0_), \ GPIO_FN(CS6A_), GPIO_FN(WAIT_), \ GPIO_FN(DREQ0), GPIO_FN(RD__FSC), GPIO_FN(WE0__FWE), \ GPIO_FN(RDWR_FWE), GPIO_FN(WE1_), GPIO_FN(FRB), GPIO_FN(CKO), GPIO_FN(NBRSTOUT_), GPIO_FN(NBRST_), GPIO_FN(BBIF2_TXD), GPIO_FN(BBIF2_RXD), GPIO_FN(BBIF2_SYNC), GPIO_FN(BBIF2_SCK), GPIO_FN(SCIFA3_CTS_), \ GPIO_FN(MFG3_IN2), GPIO_FN(SCIFA3_RXD), \ GPIO_FN(MFG3_IN1), GPIO_FN(BBIF1_SS2), \ GPIO_FN(SCIFA3_RTS_), \ GPIO_FN(MFG3_OUT1), GPIO_FN(SCIFA3_TXD), GPIO_FN(HSI_RX_DATA), \ GPIO_FN(BBIF1_RXD), GPIO_FN(HSI_TX_WAKE), \ GPIO_FN(BBIF1_TSCK), GPIO_FN(HSI_TX_DATA), \ GPIO_FN(BBIF1_TSYNC), GPIO_FN(HSI_TX_READY), \ GPIO_FN(BBIF1_TXD), GPIO_FN(HSI_RX_READY), \ GPIO_FN(BBIF1_RSCK), \ GPIO_FN(PORT115_I2C_SCL2), \ GPIO_FN(PORT115_I2C_SCL3), GPIO_FN(HSI_RX_WAKE), \ GPIO_FN(BBIF1_RSYNC), \ GPIO_FN(PORT116_I2C_SDA2), \ GPIO_FN(PORT116_I2C_SDA3), GPIO_FN(HSI_RX_FLAG), \ GPIO_FN(BBIF1_SS1), \ GPIO_FN(BBIF1_FLOW), GPIO_FN(HSI_TX_FLAG), GPIO_FN(VIO_VD), \ GPIO_FN(PORT128_LCD2VSYN), \ GPIO_FN(VIO2_VD), \ GPIO_FN(LCD2D0), GPIO_FN(VIO_HD), \ GPIO_FN(PORT129_LCD2HSYN), \ GPIO_FN(PORT129_LCD2CS_), \ GPIO_FN(VIO2_HD), \ GPIO_FN(LCD2D1), GPIO_FN(VIO_D0), \ GPIO_FN(PORT130_MSIOF2_RXD), \ GPIO_FN(LCD2D10), GPIO_FN(VIO_D1), \ GPIO_FN(PORT131_KEYOUT6), \ GPIO_FN(PORT131_MSIOF2_SS1), \ GPIO_FN(PORT131_KEYOUT11), \ GPIO_FN(LCD2D11), GPIO_FN(VIO_D2), \ GPIO_FN(PORT132_KEYOUT7), \ GPIO_FN(PORT132_MSIOF2_SS2), \ GPIO_FN(PORT132_KEYOUT10), \ GPIO_FN(LCD2D12), GPIO_FN(VIO_D3), \ GPIO_FN(MSIOF2_TSYNC), \ GPIO_FN(LCD2D13), GPIO_FN(VIO_D4), \ GPIO_FN(MSIOF2_TXD), \ GPIO_FN(LCD2D14), GPIO_FN(VIO_D5), \ GPIO_FN(MSIOF2_TSCK), \ GPIO_FN(LCD2D15), GPIO_FN(VIO_D6), \ GPIO_FN(PORT136_KEYOUT8), \ GPIO_FN(LCD2D16), GPIO_FN(VIO_D7), \ GPIO_FN(PORT137_KEYOUT9), \ GPIO_FN(LCD2D17), GPIO_FN(VIO_D8), \ GPIO_FN(PORT138_KEYOUT8), \ GPIO_FN(VIO2_D0), \ GPIO_FN(LCD2D6), GPIO_FN(VIO_D9), \ GPIO_FN(PORT139_KEYOUT9), \ GPIO_FN(VIO2_D1), \ GPIO_FN(LCD2D7), GPIO_FN(VIO_D10), \ GPIO_FN(TPU0TO2), \ GPIO_FN(VIO2_D2), \ GPIO_FN(LCD2D8), GPIO_FN(VIO_D11), \ GPIO_FN(TPU0TO3), \ GPIO_FN(VIO2_D3), \ GPIO_FN(LCD2D9), GPIO_FN(VIO_D12), \ GPIO_FN(PORT142_KEYOUT10), \ GPIO_FN(VIO2_D4), \ GPIO_FN(LCD2D2), GPIO_FN(VIO_D13), \ GPIO_FN(PORT143_KEYOUT11), \ GPIO_FN(PORT143_KEYOUT6), \ GPIO_FN(VIO2_D5), \ GPIO_FN(LCD2D3), GPIO_FN(VIO_D14), \ GPIO_FN(PORT144_KEYOUT7), \ GPIO_FN(VIO2_D6), \ GPIO_FN(LCD2D4), GPIO_FN(VIO_D15), \ GPIO_FN(TPU1TO3), \ GPIO_FN(PORT145_LCD2DISP), \ GPIO_FN(PORT145_LCD2RS), \ GPIO_FN(VIO2_D7), \ GPIO_FN(LCD2D5), GPIO_FN(VIO_CLK), \ GPIO_FN(LCD2DCK), \ GPIO_FN(PORT146_LCD2WR_), \ GPIO_FN(VIO2_CLK), \ GPIO_FN(LCD2D18), GPIO_FN(VIO_FIELD), \ GPIO_FN(LCD2RD_), \ GPIO_FN(VIO2_FIELD), \ GPIO_FN(LCD2D19), GPIO_FN(VIO_CKO), GPIO_FN(A27), \ GPIO_FN(PORT149_RDWR), \ GPIO_FN(MFG0_IN1), \ GPIO_FN(PORT149_KEYOUT9), GPIO_FN(MFG0_IN2), GPIO_FN(TS_SPSYNC3), \ GPIO_FN(MSIOF2_RSCK), GPIO_FN(TS_SDAT3), \ GPIO_FN(MSIOF2_RSYNC), GPIO_FN(TPU1TO2), \ GPIO_FN(TS_SDEN3), \ GPIO_FN(PORT153_MSIOF2_SS1), GPIO_FN(SCIFA2_TXD1), \ GPIO_FN(MSIOF2_MCK0), GPIO_FN(SCIFA2_RXD1), \ GPIO_FN(MSIOF2_MCK1), GPIO_FN(SCIFA2_RTS1_), \ GPIO_FN(PORT156_MSIOF2_SS2), GPIO_FN(SCIFA2_CTS1_), \ GPIO_FN(PORT157_MSIOF2_RXD), GPIO_FN(DINT_), \ GPIO_FN(SCIFA2_SCK1), \ GPIO_FN(TS_SCK3), GPIO_FN(PORT159_SCIFB_SCK), \ GPIO_FN(PORT159_SCIFA5_SCK), \ GPIO_FN(NMI), GPIO_FN(PORT160_SCIFB_TXD), \ GPIO_FN(PORT160_SCIFA5_TXD), GPIO_FN(PORT161_SCIFB_CTS_), \ GPIO_FN(PORT161_SCIFA5_CTS_), GPIO_FN(PORT162_SCIFB_RXD), \ GPIO_FN(PORT162_SCIFA5_RXD), GPIO_FN(PORT163_SCIFB_RTS_), \ GPIO_FN(PORT163_SCIFA5_RTS_), \ GPIO_FN(TPU3TO0), GPIO_FN(LCDD0), GPIO_FN(LCDD1), \ GPIO_FN(PORT193_SCIFA5_CTS_), \ GPIO_FN(BBIF2_TSYNC1), GPIO_FN(LCDD2), \ GPIO_FN(PORT194_SCIFA5_RTS_), \ GPIO_FN(BBIF2_TSCK1), GPIO_FN(LCDD3), \ GPIO_FN(PORT195_SCIFA5_RXD), \ GPIO_FN(BBIF2_TXD1), GPIO_FN(LCDD4), \ GPIO_FN(PORT196_SCIFA5_TXD), GPIO_FN(LCDD5), \ GPIO_FN(PORT197_SCIFA5_SCK), \ GPIO_FN(MFG2_OUT2), \ GPIO_FN(TPU2TO1), GPIO_FN(LCDD6), GPIO_FN(LCDD7), \ GPIO_FN(TPU4TO1), \ GPIO_FN(MFG4_OUT2), GPIO_FN(LCDD8), \ GPIO_FN(D16), GPIO_FN(LCDD9), \ GPIO_FN(D17), GPIO_FN(LCDD10), \ GPIO_FN(D18), GPIO_FN(LCDD11), \ GPIO_FN(D19), GPIO_FN(LCDD12), \ GPIO_FN(D20), GPIO_FN(LCDD13), \ GPIO_FN(D21), GPIO_FN(LCDD14), \ GPIO_FN(D22), GPIO_FN(LCDD15), \ GPIO_FN(PORT207_MSIOF0L_SS1), \ GPIO_FN(D23), GPIO_FN(LCDD16), \ GPIO_FN(PORT208_MSIOF0L_SS2), \ GPIO_FN(D24), GPIO_FN(LCDD17), \ GPIO_FN(D25), GPIO_FN(LCDD18), \ GPIO_FN(DREQ2), \ GPIO_FN(PORT210_MSIOF0L_SS1), \ GPIO_FN(D26), GPIO_FN(LCDD19), \ GPIO_FN(PORT211_MSIOF0L_SS2), \ GPIO_FN(D27), GPIO_FN(LCDD20), \ GPIO_FN(TS_SPSYNC1), \ GPIO_FN(MSIOF0L_MCK0), \ GPIO_FN(D28), GPIO_FN(LCDD21), \ GPIO_FN(TS_SDAT1), \ GPIO_FN(MSIOF0L_MCK1), \ GPIO_FN(D29), GPIO_FN(LCDD22), \ GPIO_FN(TS_SDEN1), \ GPIO_FN(MSIOF0L_RSCK), \ GPIO_FN(D30), GPIO_FN(LCDD23), \ GPIO_FN(TS_SCK1), \ GPIO_FN(MSIOF0L_RSYNC), \ GPIO_FN(D31), GPIO_FN(LCDDCK), \ GPIO_FN(LCDWR_), GPIO_FN(LCDRD_), \ GPIO_FN(DACK2), \ GPIO_FN(PORT217_LCD2RS), \ GPIO_FN(MSIOF0L_TSYNC), \ GPIO_FN(VIO2_FIELD3), \ GPIO_FN(PORT217_LCD2DISP), GPIO_FN(LCDHSYN), \ GPIO_FN(LCDCS_), \ GPIO_FN(LCDCS2_), \ GPIO_FN(DACK3), \ GPIO_FN(PORT218_VIO_CKOR), GPIO_FN(LCDDISP), \ GPIO_FN(LCDRS), \ GPIO_FN(PORT219_LCD2WR_), \ GPIO_FN(DREQ3), \ GPIO_FN(MSIOF0L_TSCK), \ GPIO_FN(VIO2_CLK3), \ GPIO_FN(LCD2DCK_2), GPIO_FN(LCDVSYN), \ GPIO_FN(LCDVSYN2), GPIO_FN(LCDLCLK), \ GPIO_FN(DREQ1), \ GPIO_FN(PORT221_LCD2CS_), \ GPIO_FN(PWEN), \ GPIO_FN(MSIOF0L_RXD), \ GPIO_FN(VIO2_HD3), \ GPIO_FN(PORT221_LCD2HSYN), GPIO_FN(LCDDON), \ GPIO_FN(LCDDON2), \ GPIO_FN(DACK1), \ GPIO_FN(OVCN), \ GPIO_FN(MSIOF0L_TXD), \ GPIO_FN(VIO2_VD3), \ GPIO_FN(PORT222_LCD2VSYN), GPIO_FN(SCIFA1_TXD), \ GPIO_FN(OVCN2), GPIO_FN(EXTLP), \ GPIO_FN(SCIFA1_SCK), \ GPIO_FN(PORT226_VIO_CKO2), GPIO_FN(SCIFA1_RTS_), \ GPIO_FN(IDIN), GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_CTS_), \ GPIO_FN(MFG1_IN1), GPIO_FN(MSIOF1_TXD), \ GPIO_FN(SCIFA2_TXD2), GPIO_FN(MSIOF1_TSYNC), \ GPIO_FN(SCIFA2_CTS2_), GPIO_FN(MSIOF1_TSCK), \ GPIO_FN(SCIFA2_SCK2), GPIO_FN(MSIOF1_RXD), \ GPIO_FN(SCIFA2_RXD2), GPIO_FN(MSIOF1_RSCK), \ GPIO_FN(SCIFA2_RTS2_), \ GPIO_FN(VIO2_CLK2), \ GPIO_FN(LCD2D20), GPIO_FN(MSIOF1_RSYNC), \ GPIO_FN(MFG1_IN2), \ GPIO_FN(VIO2_VD2), \ GPIO_FN(LCD2D21), GPIO_FN(MSIOF1_MCK0), \ GPIO_FN(PORT236_I2C_SDA2), GPIO_FN(MSIOF1_MCK1), \ GPIO_FN(PORT237_I2C_SCL2), GPIO_FN(MSIOF1_SS1), \ GPIO_FN(VIO2_FIELD2), \ GPIO_FN(LCD2D22), GPIO_FN(MSIOF1_SS2), \ GPIO_FN(VIO2_HD2), \ GPIO_FN(LCD2D23), GPIO_FN(SCIFA6_TXD), GPIO_FN(PORT241_IRDA_OUT), \ GPIO_FN(PORT241_IROUT), \ GPIO_FN(MFG4_OUT1), \ GPIO_FN(TPU4TO0), GPIO_FN(PORT242_IRDA_IN), \ GPIO_FN(MFG4_IN2), GPIO_FN(PORT243_IRDA_FIRSEL), \ GPIO_FN(PORT243_VIO_CKO2), GPIO_FN(PORT244_SCIFA5_CTS_), \ GPIO_FN(MFG2_IN1), \ GPIO_FN(PORT244_SCIFB_CTS_), \ GPIO_FN(MSIOF2R_RXD), GPIO_FN(PORT245_SCIFA5_RTS_), \ GPIO_FN(MFG2_IN2), \ GPIO_FN(PORT245_SCIFB_RTS_), \ GPIO_FN(MSIOF2R_TXD), GPIO_FN(PORT246_SCIFA5_RXD), \ GPIO_FN(MFG1_OUT1), \ GPIO_FN(PORT246_SCIFB_RXD), \ GPIO_FN(TPU1TO0), GPIO_FN(PORT247_SCIFA5_TXD), \ GPIO_FN(MFG3_OUT2), \ GPIO_FN(PORT247_SCIFB_TXD), \ GPIO_FN(TPU3TO1), GPIO_FN(PORT248_SCIFA5_SCK), \ GPIO_FN(MFG2_OUT1), \ GPIO_FN(PORT248_SCIFB_SCK), \ GPIO_FN(TPU2TO0), \ GPIO_FN(PORT248_I2C_SCL3), \ GPIO_FN(MSIOF2R_TSCK), GPIO_FN(PORT249_IROUT), \ GPIO_FN(MFG4_IN1), \ GPIO_FN(PORT249_I2C_SDA3), \ GPIO_FN(MSIOF2R_TSYNC), GPIO_FN(SDHICLK0), GPIO_FN(SDHICD0), GPIO_FN(SDHID0_0), GPIO_FN(SDHID0_1), GPIO_FN(SDHID0_2), GPIO_FN(SDHID0_3), GPIO_FN(SDHICMD0), GPIO_FN(SDHIWP0), GPIO_FN(SDHICLK1), GPIO_FN(SDHID1_0), \ GPIO_FN(TS_SPSYNC2), GPIO_FN(SDHID1_1), \ GPIO_FN(TS_SDAT2), GPIO_FN(SDHID1_2), \ GPIO_FN(TS_SDEN2), GPIO_FN(SDHID1_3), \ GPIO_FN(TS_SCK2), GPIO_FN(SDHICMD1), GPIO_FN(SDHICLK2), GPIO_FN(SDHID2_0), \ GPIO_FN(TS_SPSYNC4), GPIO_FN(SDHID2_1), \ GPIO_FN(TS_SDAT4), GPIO_FN(SDHID2_2), \ GPIO_FN(TS_SDEN4), GPIO_FN(SDHID2_3), \ GPIO_FN(TS_SCK4), GPIO_FN(SDHICMD2), GPIO_FN(MMCCLK0), GPIO_FN(MMCD0_0), GPIO_FN(MMCD0_1), GPIO_FN(MMCD0_2), GPIO_FN(MMCD0_3), GPIO_FN(MMCD0_4), \ GPIO_FN(TS_SPSYNC5), GPIO_FN(MMCD0_5), \ GPIO_FN(TS_SDAT5), GPIO_FN(MMCD0_6), \ GPIO_FN(TS_SDEN5), GPIO_FN(MMCD0_7), \ GPIO_FN(TS_SCK5), GPIO_FN(MMCCMD0), GPIO_FN(RESETOUTS_), \ GPIO_FN(EXTAL2OUT), GPIO_FN(MCP_WAIT__MCP_FRB), GPIO_FN(MCP_CKO), \ GPIO_FN(MMCCLK1), GPIO_FN(MCP_D15_MCP_NAF15), GPIO_FN(MCP_D14_MCP_NAF14), GPIO_FN(MCP_D13_MCP_NAF13), GPIO_FN(MCP_D12_MCP_NAF12), GPIO_FN(MCP_D11_MCP_NAF11), GPIO_FN(MCP_D10_MCP_NAF10), GPIO_FN(MCP_D9_MCP_NAF9), GPIO_FN(MCP_D8_MCP_NAF8), \ GPIO_FN(MMCCMD1), GPIO_FN(MCP_D7_MCP_NAF7), \ GPIO_FN(MMCD1_7), GPIO_FN(MCP_D6_MCP_NAF6), \ GPIO_FN(MMCD1_6), GPIO_FN(MCP_D5_MCP_NAF5), \ GPIO_FN(MMCD1_5), GPIO_FN(MCP_D4_MCP_NAF4), \ GPIO_FN(MMCD1_4), GPIO_FN(MCP_D3_MCP_NAF3), \ GPIO_FN(MMCD1_3), GPIO_FN(MCP_D2_MCP_NAF2), \ GPIO_FN(MMCD1_2), GPIO_FN(MCP_D1_MCP_NAF1), \ GPIO_FN(MMCD1_1), GPIO_FN(MCP_D0_MCP_NAF0), \ GPIO_FN(MMCD1_0), GPIO_FN(MCP_NBRSTOUT_), GPIO_FN(MCP_WE0__MCP_FWE), \ GPIO_FN(MCP_RDWR_MCP_FWE), /* MSEL2 special cases */ GPIO_FN(TSIF2_TS_XX1), GPIO_FN(TSIF2_TS_XX2), GPIO_FN(TSIF2_TS_XX3), GPIO_FN(TSIF2_TS_XX4), GPIO_FN(TSIF2_TS_XX5), GPIO_FN(TSIF1_TS_XX1), GPIO_FN(TSIF1_TS_XX2), GPIO_FN(TSIF1_TS_XX3), GPIO_FN(TSIF1_TS_XX4), GPIO_FN(TSIF1_TS_XX5), GPIO_FN(TSIF0_TS_XX1), GPIO_FN(TSIF0_TS_XX2), GPIO_FN(TSIF0_TS_XX3), GPIO_FN(TSIF0_TS_XX4), GPIO_FN(TSIF0_TS_XX5), GPIO_FN(MST1_TS_XX1), GPIO_FN(MST1_TS_XX2), GPIO_FN(MST1_TS_XX3), GPIO_FN(MST1_TS_XX4), GPIO_FN(MST1_TS_XX5), GPIO_FN(MST0_TS_XX1), GPIO_FN(MST0_TS_XX2), GPIO_FN(MST0_TS_XX3), GPIO_FN(MST0_TS_XX4), GPIO_FN(MST0_TS_XX5), /* MSEL3 special cases */ GPIO_FN(SDHI0_VCCQ_MC0_ON), GPIO_FN(SDHI0_VCCQ_MC0_OFF), GPIO_FN(DEBUG_MON_VIO), GPIO_FN(DEBUG_MON_LCDD), GPIO_FN(LCDC_LCDC0), GPIO_FN(LCDC_LCDC1), /* MSEL4 special cases */ GPIO_FN(IRQ9_MEM_INT), GPIO_FN(IRQ9_MCP_INT), GPIO_FN(A11), GPIO_FN(KEYOUT8), GPIO_FN(TPU4TO3), GPIO_FN(RESETA_N_PU_ON), GPIO_FN(RESETA_N_PU_OFF), GPIO_FN(EDBGREQ_PD), GPIO_FN(EDBGREQ_PU), /* Functions with pull-ups */ GPIO_FN(KEYIN0_PU), GPIO_FN(KEYIN1_PU), GPIO_FN(KEYIN2_PU), GPIO_FN(KEYIN3_PU), GPIO_FN(KEYIN4_PU), GPIO_FN(KEYIN5_PU), GPIO_FN(KEYIN6_PU), GPIO_FN(KEYIN7_PU), GPIO_FN(SDHID1_0_PU), GPIO_FN(SDHID1_1_PU), GPIO_FN(SDHID1_2_PU), GPIO_FN(SDHID1_3_PU), GPIO_FN(SDHICMD1_PU), GPIO_FN(MMCCMD0_PU), GPIO_FN(MMCCMD1_PU), GPIO_FN(FSIACK_PU), GPIO_FN(FSIAILR_PU), GPIO_FN(FSIAIBT_PU), GPIO_FN(FSIAISLD_PU), }; #define PORTCR(nr, reg) \ { PINMUX_CFG_REG("PORT" nr "CR", reg, 8, 4) { \ 0, \ /*0001*/ PORT##nr##_OUT , \ /*0010*/ PORT##nr##_IN , 0, 0, 0, 0, 0, 0, 0, \ /*1010*/ PORT##nr##_IN_PD, 0, 0, 0, \ /*1110*/ PORT##nr##_IN_PU, 0, \ PORT##nr##_FN0, PORT##nr##_FN1, PORT##nr##_FN2, \ PORT##nr##_FN3, PORT##nr##_FN4, PORT##nr##_FN5, \ PORT##nr##_FN6, PORT##nr##_FN7, 0, 0, 0, 0, 0, 0, 0, 0 } \ } static struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(0, 0xe6050000), /* PORT0CR */ PORTCR(1, 0xe6050001), /* PORT1CR */ PORTCR(2, 0xe6050002), /* PORT2CR */ PORTCR(3, 0xe6050003), /* PORT3CR */ PORTCR(4, 0xe6050004), /* PORT4CR */ PORTCR(5, 0xe6050005), /* PORT5CR */ PORTCR(6, 0xe6050006), /* PORT6CR */ PORTCR(7, 0xe6050007), /* PORT7CR */ PORTCR(8, 0xe6050008), /* PORT8CR */ PORTCR(9, 0xe6050009), /* PORT9CR */ PORTCR(10, 0xe605000a), /* PORT10CR */ PORTCR(11, 0xe605000b), /* PORT11CR */ PORTCR(12, 0xe605000c), /* PORT12CR */ PORTCR(13, 0xe605000d), /* PORT13CR */ PORTCR(14, 0xe605000e), /* PORT14CR */ PORTCR(15, 0xe605000f), /* PORT15CR */ PORTCR(16, 0xe6050010), /* PORT16CR */ PORTCR(17, 0xe6050011), /* PORT17CR */ PORTCR(18, 0xe6050012), /* PORT18CR */ PORTCR(19, 0xe6050013), /* PORT19CR */ PORTCR(20, 0xe6050014), /* PORT20CR */ PORTCR(21, 0xe6050015), /* PORT21CR */ PORTCR(22, 0xe6050016), /* PORT22CR */ PORTCR(23, 0xe6050017), /* PORT23CR */ PORTCR(24, 0xe6050018), /* PORT24CR */ PORTCR(25, 0xe6050019), /* PORT25CR */ PORTCR(26, 0xe605001a), /* PORT26CR */ PORTCR(27, 0xe605001b), /* PORT27CR */ PORTCR(28, 0xe605001c), /* PORT28CR */ PORTCR(29, 0xe605001d), /* PORT29CR */ PORTCR(30, 0xe605001e), /* PORT30CR */ PORTCR(31, 0xe605001f), /* PORT31CR */ PORTCR(32, 0xe6051020), /* PORT32CR */ PORTCR(33, 0xe6051021), /* PORT33CR */ PORTCR(34, 0xe6051022), /* PORT34CR */ PORTCR(35, 0xe6051023), /* PORT35CR */ PORTCR(36, 0xe6051024), /* PORT36CR */ PORTCR(37, 0xe6051025), /* PORT37CR */ PORTCR(38, 0xe6051026), /* PORT38CR */ PORTCR(39, 0xe6051027), /* PORT39CR */ PORTCR(40, 0xe6051028), /* PORT40CR */ PORTCR(41, 0xe6051029), /* PORT41CR */ PORTCR(42, 0xe605102a), /* PORT42CR */ PORTCR(43, 0xe605102b), /* PORT43CR */ PORTCR(44, 0xe605102c), /* PORT44CR */ PORTCR(45, 0xe605102d), /* PORT45CR */ PORTCR(46, 0xe605102e), /* PORT46CR */ PORTCR(47, 0xe605102f), /* PORT47CR */ PORTCR(48, 0xe6051030), /* PORT48CR */ PORTCR(49, 0xe6051031), /* PORT49CR */ PORTCR(50, 0xe6051032), /* PORT50CR */ PORTCR(51, 0xe6051033), /* PORT51CR */ PORTCR(52, 0xe6051034), /* PORT52CR */ PORTCR(53, 0xe6051035), /* PORT53CR */ PORTCR(54, 0xe6051036), /* PORT54CR */ PORTCR(55, 0xe6051037), /* PORT55CR */ PORTCR(56, 0xe6051038), /* PORT56CR */ PORTCR(57, 0xe6051039), /* PORT57CR */ PORTCR(58, 0xe605103a), /* PORT58CR */ PORTCR(59, 0xe605103b), /* PORT59CR */ PORTCR(60, 0xe605103c), /* PORT60CR */ PORTCR(61, 0xe605103d), /* PORT61CR */ PORTCR(62, 0xe605103e), /* PORT62CR */ PORTCR(63, 0xe605103f), /* PORT63CR */ PORTCR(64, 0xe6051040), /* PORT64CR */ PORTCR(65, 0xe6051041), /* PORT65CR */ PORTCR(66, 0xe6051042), /* PORT66CR */ PORTCR(67, 0xe6051043), /* PORT67CR */ PORTCR(68, 0xe6051044), /* PORT68CR */ PORTCR(69, 0xe6051045), /* PORT69CR */ PORTCR(70, 0xe6051046), /* PORT70CR */ PORTCR(71, 0xe6051047), /* PORT71CR */ PORTCR(72, 0xe6051048), /* PORT72CR */ PORTCR(73, 0xe6051049), /* PORT73CR */ PORTCR(74, 0xe605104a), /* PORT74CR */ PORTCR(75, 0xe605104b), /* PORT75CR */ PORTCR(76, 0xe605104c), /* PORT76CR */ PORTCR(77, 0xe605104d), /* PORT77CR */ PORTCR(78, 0xe605104e), /* PORT78CR */ PORTCR(79, 0xe605104f), /* PORT79CR */ PORTCR(80, 0xe6051050), /* PORT80CR */ PORTCR(81, 0xe6051051), /* PORT81CR */ PORTCR(82, 0xe6051052), /* PORT82CR */ PORTCR(83, 0xe6051053), /* PORT83CR */ PORTCR(84, 0xe6051054), /* PORT84CR */ PORTCR(85, 0xe6051055), /* PORT85CR */ PORTCR(86, 0xe6051056), /* PORT86CR */ PORTCR(87, 0xe6051057), /* PORT87CR */ PORTCR(88, 0xe6051058), /* PORT88CR */ PORTCR(89, 0xe6051059), /* PORT89CR */ PORTCR(90, 0xe605105a), /* PORT90CR */ PORTCR(91, 0xe605105b), /* PORT91CR */ PORTCR(92, 0xe605105c), /* PORT92CR */ PORTCR(93, 0xe605105d), /* PORT93CR */ PORTCR(94, 0xe605105e), /* PORT94CR */ PORTCR(95, 0xe605105f), /* PORT95CR */ PORTCR(96, 0xe6052060), /* PORT96CR */ PORTCR(97, 0xe6052061), /* PORT97CR */ PORTCR(98, 0xe6052062), /* PORT98CR */ PORTCR(99, 0xe6052063), /* PORT99CR */ PORTCR(100, 0xe6052064), /* PORT100CR */ PORTCR(101, 0xe6052065), /* PORT101CR */ PORTCR(102, 0xe6052066), /* PORT102CR */ PORTCR(103, 0xe6052067), /* PORT103CR */ PORTCR(104, 0xe6052068), /* PORT104CR */ PORTCR(105, 0xe6052069), /* PORT105CR */ PORTCR(106, 0xe605206a), /* PORT106CR */ PORTCR(107, 0xe605206b), /* PORT107CR */ PORTCR(108, 0xe605206c), /* PORT108CR */ PORTCR(109, 0xe605206d), /* PORT109CR */ PORTCR(110, 0xe605206e), /* PORT110CR */ PORTCR(111, 0xe605206f), /* PORT111CR */ PORTCR(112, 0xe6052070), /* PORT112CR */ PORTCR(113, 0xe6052071), /* PORT113CR */ PORTCR(114, 0xe6052072), /* PORT114CR */ PORTCR(115, 0xe6052073), /* PORT115CR */ PORTCR(116, 0xe6052074), /* PORT116CR */ PORTCR(117, 0xe6052075), /* PORT117CR */ PORTCR(118, 0xe6052076), /* PORT118CR */ PORTCR(128, 0xe6052080), /* PORT128CR */ PORTCR(129, 0xe6052081), /* PORT129CR */ PORTCR(130, 0xe6052082), /* PORT130CR */ PORTCR(131, 0xe6052083), /* PORT131CR */ PORTCR(132, 0xe6052084), /* PORT132CR */ PORTCR(133, 0xe6052085), /* PORT133CR */ PORTCR(134, 0xe6052086), /* PORT134CR */ PORTCR(135, 0xe6052087), /* PORT135CR */ PORTCR(136, 0xe6052088), /* PORT136CR */ PORTCR(137, 0xe6052089), /* PORT137CR */ PORTCR(138, 0xe605208a), /* PORT138CR */ PORTCR(139, 0xe605208b), /* PORT139CR */ PORTCR(140, 0xe605208c), /* PORT140CR */ PORTCR(141, 0xe605208d), /* PORT141CR */ PORTCR(142, 0xe605208e), /* PORT142CR */ PORTCR(143, 0xe605208f), /* PORT143CR */ PORTCR(144, 0xe6052090), /* PORT144CR */ PORTCR(145, 0xe6052091), /* PORT145CR */ PORTCR(146, 0xe6052092), /* PORT146CR */ PORTCR(147, 0xe6052093), /* PORT147CR */ PORTCR(148, 0xe6052094), /* PORT148CR */ PORTCR(149, 0xe6052095), /* PORT149CR */ PORTCR(150, 0xe6052096), /* PORT150CR */ PORTCR(151, 0xe6052097), /* PORT151CR */ PORTCR(152, 0xe6052098), /* PORT152CR */ PORTCR(153, 0xe6052099), /* PORT153CR */ PORTCR(154, 0xe605209a), /* PORT154CR */ PORTCR(155, 0xe605209b), /* PORT155CR */ PORTCR(156, 0xe605209c), /* PORT156CR */ PORTCR(157, 0xe605209d), /* PORT157CR */ PORTCR(158, 0xe605209e), /* PORT158CR */ PORTCR(159, 0xe605209f), /* PORT159CR */ PORTCR(160, 0xe60520a0), /* PORT160CR */ PORTCR(161, 0xe60520a1), /* PORT161CR */ PORTCR(162, 0xe60520a2), /* PORT162CR */ PORTCR(163, 0xe60520a3), /* PORT163CR */ PORTCR(164, 0xe60520a4), /* PORT164CR */ PORTCR(192, 0xe60520c0), /* PORT192CR */ PORTCR(193, 0xe60520c1), /* PORT193CR */ PORTCR(194, 0xe60520c2), /* PORT194CR */ PORTCR(195, 0xe60520c3), /* PORT195CR */ PORTCR(196, 0xe60520c4), /* PORT196CR */ PORTCR(197, 0xe60520c5), /* PORT197CR */ PORTCR(198, 0xe60520c6), /* PORT198CR */ PORTCR(199, 0xe60520c7), /* PORT199CR */ PORTCR(200, 0xe60520c8), /* PORT200CR */ PORTCR(201, 0xe60520c9), /* PORT201CR */ PORTCR(202, 0xe60520ca), /* PORT202CR */ PORTCR(203, 0xe60520cb), /* PORT203CR */ PORTCR(204, 0xe60520cc), /* PORT204CR */ PORTCR(205, 0xe60520cd), /* PORT205CR */ PORTCR(206, 0xe60520ce), /* PORT206CR */ PORTCR(207, 0xe60520cf), /* PORT207CR */ PORTCR(208, 0xe60520d0), /* PORT208CR */ PORTCR(209, 0xe60520d1), /* PORT209CR */ PORTCR(210, 0xe60520d2), /* PORT210CR */ PORTCR(211, 0xe60520d3), /* PORT211CR */ PORTCR(212, 0xe60520d4), /* PORT212CR */ PORTCR(213, 0xe60520d5), /* PORT213CR */ PORTCR(214, 0xe60520d6), /* PORT214CR */ PORTCR(215, 0xe60520d7), /* PORT215CR */ PORTCR(216, 0xe60520d8), /* PORT216CR */ PORTCR(217, 0xe60520d9), /* PORT217CR */ PORTCR(218, 0xe60520da), /* PORT218CR */ PORTCR(219, 0xe60520db), /* PORT219CR */ PORTCR(220, 0xe60520dc), /* PORT220CR */ PORTCR(221, 0xe60520dd), /* PORT221CR */ PORTCR(222, 0xe60520de), /* PORT222CR */ PORTCR(223, 0xe60520df), /* PORT223CR */ PORTCR(224, 0xe60530e0), /* PORT224CR */ PORTCR(225, 0xe60530e1), /* PORT225CR */ PORTCR(226, 0xe60530e2), /* PORT226CR */ PORTCR(227, 0xe60530e3), /* PORT227CR */ PORTCR(228, 0xe60530e4), /* PORT228CR */ PORTCR(229, 0xe60530e5), /* PORT229CR */ PORTCR(230, 0xe60530e6), /* PORT230CR */ PORTCR(231, 0xe60530e7), /* PORT231CR */ PORTCR(232, 0xe60530e8), /* PORT232CR */ PORTCR(233, 0xe60530e9), /* PORT233CR */ PORTCR(234, 0xe60530ea), /* PORT234CR */ PORTCR(235, 0xe60530eb), /* PORT235CR */ PORTCR(236, 0xe60530ec), /* PORT236CR */ PORTCR(237, 0xe60530ed), /* PORT237CR */ PORTCR(238, 0xe60530ee), /* PORT238CR */ PORTCR(239, 0xe60530ef), /* PORT239CR */ PORTCR(240, 0xe60530f0), /* PORT240CR */ PORTCR(241, 0xe60530f1), /* PORT241CR */ PORTCR(242, 0xe60530f2), /* PORT242CR */ PORTCR(243, 0xe60530f3), /* PORT243CR */ PORTCR(244, 0xe60530f4), /* PORT244CR */ PORTCR(245, 0xe60530f5), /* PORT245CR */ PORTCR(246, 0xe60530f6), /* PORT246CR */ PORTCR(247, 0xe60530f7), /* PORT247CR */ PORTCR(248, 0xe60530f8), /* PORT248CR */ PORTCR(249, 0xe60530f9), /* PORT249CR */ PORTCR(250, 0xe60530fa), /* PORT250CR */ PORTCR(251, 0xe60530fb), /* PORT251CR */ PORTCR(252, 0xe60530fc), /* PORT252CR */ PORTCR(253, 0xe60530fd), /* PORT253CR */ PORTCR(254, 0xe60530fe), /* PORT254CR */ PORTCR(255, 0xe60530ff), /* PORT255CR */ PORTCR(256, 0xe6053100), /* PORT256CR */ PORTCR(257, 0xe6053101), /* PORT257CR */ PORTCR(258, 0xe6053102), /* PORT258CR */ PORTCR(259, 0xe6053103), /* PORT259CR */ PORTCR(260, 0xe6053104), /* PORT260CR */ PORTCR(261, 0xe6053105), /* PORT261CR */ PORTCR(262, 0xe6053106), /* PORT262CR */ PORTCR(263, 0xe6053107), /* PORT263CR */ PORTCR(264, 0xe6053108), /* PORT264CR */ PORTCR(265, 0xe6053109), /* PORT265CR */ PORTCR(266, 0xe605310a), /* PORT266CR */ PORTCR(267, 0xe605310b), /* PORT267CR */ PORTCR(268, 0xe605310c), /* PORT268CR */ PORTCR(269, 0xe605310d), /* PORT269CR */ PORTCR(270, 0xe605310e), /* PORT270CR */ PORTCR(271, 0xe605310f), /* PORT271CR */ PORTCR(272, 0xe6053110), /* PORT272CR */ PORTCR(273, 0xe6053111), /* PORT273CR */ PORTCR(274, 0xe6053112), /* PORT274CR */ PORTCR(275, 0xe6053113), /* PORT275CR */ PORTCR(276, 0xe6053114), /* PORT276CR */ PORTCR(277, 0xe6053115), /* PORT277CR */ PORTCR(278, 0xe6053116), /* PORT278CR */ PORTCR(279, 0xe6053117), /* PORT279CR */ PORTCR(280, 0xe6053118), /* PORT280CR */ PORTCR(281, 0xe6053119), /* PORT281CR */ PORTCR(282, 0xe605311a), /* PORT282CR */ PORTCR(288, 0xe6052120), /* PORT288CR */ PORTCR(289, 0xe6052121), /* PORT289CR */ PORTCR(290, 0xe6052122), /* PORT290CR */ PORTCR(291, 0xe6052123), /* PORT291CR */ PORTCR(292, 0xe6052124), /* PORT292CR */ PORTCR(293, 0xe6052125), /* PORT293CR */ PORTCR(294, 0xe6052126), /* PORT294CR */ PORTCR(295, 0xe6052127), /* PORT295CR */ PORTCR(296, 0xe6052128), /* PORT296CR */ PORTCR(297, 0xe6052129), /* PORT297CR */ PORTCR(298, 0xe605212a), /* PORT298CR */ PORTCR(299, 0xe605212b), /* PORT299CR */ PORTCR(300, 0xe605212c), /* PORT300CR */ PORTCR(301, 0xe605212d), /* PORT301CR */ PORTCR(302, 0xe605212e), /* PORT302CR */ PORTCR(303, 0xe605212f), /* PORT303CR */ PORTCR(304, 0xe6052130), /* PORT304CR */ PORTCR(305, 0xe6052131), /* PORT305CR */ PORTCR(306, 0xe6052132), /* PORT306CR */ PORTCR(307, 0xe6052133), /* PORT307CR */ PORTCR(308, 0xe6052134), /* PORT308CR */ PORTCR(309, 0xe6052135), /* PORT309CR */ { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1, MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1, MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1, MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1, 0, 0, MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1, MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1, MSEL2CR_MSEL11_0, MSEL2CR_MSEL11_1, MSEL2CR_MSEL10_0, MSEL2CR_MSEL10_1, MSEL2CR_MSEL9_0, MSEL2CR_MSEL9_1, MSEL2CR_MSEL8_0, MSEL2CR_MSEL8_1, MSEL2CR_MSEL7_0, MSEL2CR_MSEL7_1, MSEL2CR_MSEL6_0, MSEL2CR_MSEL6_1, MSEL2CR_MSEL5_0, MSEL2CR_MSEL5_1, MSEL2CR_MSEL4_0, MSEL2CR_MSEL4_1, MSEL2CR_MSEL3_0, MSEL2CR_MSEL3_1, MSEL2CR_MSEL2_0, MSEL2CR_MSEL2_1, MSEL2CR_MSEL1_0, MSEL2CR_MSEL1_1, MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1, } }, { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) { 0, 0, 0, 0, 0, 0, MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1, 0, 0, 0, 0, 0, 0, MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1, 0, 0, MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1, 0, 0, 0, 0, MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1, 0, 0, 0, 0, 0, 0, MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) { 0, 0, 0, 0, MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1, 0, 0, MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1, 0, 0, 0, 0, 0, 0, MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1, MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1, MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1, 0, 0, 0, 0, 0, 0, MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1, 0, 0, MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1, MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1, MSEL4CR_MSEL10_0, MSEL4CR_MSEL10_1, MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1, MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1, MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1, 0, 0, 0, 0, MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1, 0, 0, 0, 0, MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1, 0, 0, } }, { }, }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) { PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA, PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA, PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA, PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA, PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA, PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA, PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA, PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA } }, { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) { PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA, PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA, PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA, PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA, PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA, PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA, PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA, PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA } }, { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055004, 32) { PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA, PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA, PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA, PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA, PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA, PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA, PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA, PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA } }, { PINMUX_DATA_REG("PORTR127_096DR", 0xe6056000, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT118_DATA, PORT117_DATA, PORT116_DATA, PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA, PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA, PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA, PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA, PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA } }, { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056004, 32) { PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA, PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA, PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA, PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA, PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA, PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA, PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA, PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA } }, { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056008, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT164_DATA, PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA } }, { PINMUX_DATA_REG("PORTR223_192DR", 0xe605600C, 32) { PORT223_DATA, PORT222_DATA, PORT221_DATA, PORT220_DATA, PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA, PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA, PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA, PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA, PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA, PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA, PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA } }, { PINMUX_DATA_REG("PORTU255_224DR", 0xe6057000, 32) { PORT255_DATA, PORT254_DATA, PORT253_DATA, PORT252_DATA, PORT251_DATA, PORT250_DATA, PORT249_DATA, PORT248_DATA, PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA, PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA, PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA, PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA, PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA, PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA } }, { PINMUX_DATA_REG("PORTU287_256DR", 0xe6057004, 32) { 0, 0, 0, 0, 0, PORT282_DATA, PORT281_DATA, PORT280_DATA, PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA, PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA, PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA, PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA, PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA, PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA } }, { PINMUX_DATA_REG("PORTR319_288DR", 0xe6056010, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT309_DATA, PORT308_DATA, PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA, PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA, PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA, PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA, PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA } }, { }, }; static struct pinmux_info sh73a0_pinmux_info = { .name = "sh73a0_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PORT0, .last_gpio = GPIO_FN_FSIAISLD_PU, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; void sh73a0_pinmux_init(void) { register_pinmux(&sh73a0_pinmux_info); }
gpl-2.0
zephiK/android_kernel_moto_shamu_fk
drivers/media/pci/saa7164/saa7164-api.c
2945
44815
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/wait.h> #include <linux/slab.h> #include "saa7164.h" int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i) { int ret; if (!(saa_debug & DBGLVL_CPU)) return 0; dprintk(DBGLVL_API, "%s()\n", __func__); i->deviceinst = 0; i->devicespec = 0; i->mode = 0; i->status = 0; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad); return ret; } int saa7164_api_collect_debug(struct saa7164_dev *dev) { struct tmComResDebugGetData d; u8 more = 255; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); while (more--) { memset(&d, 0, sizeof(d)); ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DEBUG_DATA_CONTROL, sizeof(d), &d); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); if (d.dwResult != SAA_OK) break; printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr, d.ucDebugData); } return 0; } int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level) { struct tmComResDebugSetLevel lvl; int ret; dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level); /* Retrieve current state */ ret = saa7164_cmd_send(dev, 0, GET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel); lvl.dwDebugLevel = level; /* set new state */ ret = saa7164_cmd_send(dev, 0, SET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_vbi_format(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResProbeCommit fmt, rsp; int ret; dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__, port->nr, port->hwcfg.unitid); fmt.bmHint = 0; fmt.bFormatIndex = 1; fmt.bFrameIndex = 1; /* Probe, see if it can support this format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret); /* See of the format change was successful */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) { printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret); } else { /* Compare requested vs received, should be same */ if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) { dprintk(DBGLVL_API, "SET/PROBE Verified\n"); /* Ask the device to select the negotiated format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() commit error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n", __func__, ret); if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) { printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n", __func__, ret); } else dprintk(DBGLVL_API, "SET/COMMIT Verified\n"); dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint); dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n", rsp.bFormatIndex); dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n", rsp.bFrameIndex); } else printk(KERN_ERR "%s() compare failed\n", __func__); } if (ret == SAA_OK) dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr); return ret; } static int saa7164_api_set_gop_size(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoGopStructure gs; int ret; dprintk(DBGLVL_ENC, "%s()\n", __func__); gs.ucRefFrameDist = port->encoder_params.refdist; gs.ucGOPSize = port->encoder_params.gop_size; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_GOP_STRUCTURE_CONTROL, sizeof(gs), &gs); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate vb; struct tmComResEncAudioBitRate ab; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) port->encoder_profile = EU_PROFILE_PS_DVD; else port->encoder_profile = EU_PROFILE_TS_HQ; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Resolution */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish video bitrates */ if (port->encoder_params.bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT; else vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK; vb.dwVideoBitRate = port->encoder_params.bitrate; vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(struct tmComResEncVideoBitRate), &vb); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish audio bitrates */ ab.ucAudioBitRateMode = 0; ab.dwAudioBitRate = 384000; ab.dwAudioBitRatePeak = ab.dwAudioBitRate; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(struct tmComResEncAudioBitRate), &ab); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); saa7164_api_set_aspect_ratio(port); saa7164_api_set_gop_size(port); return ret; } int saa7164_api_get_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate v; struct tmComResEncAudioBitRate a; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); port->encoder_profile = 0; port->video_format = 0; port->video_resolution = 0; port->audio_format = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8), &port->video_resolution); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Aspect Ratio */ ar.width = 0; ar.height = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile); dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format); dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format); dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution); dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n", v.ucVideoBitRateMode); dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n", v.dwVideoBitRate); dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n", v.dwVideoBitRatePeak); dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n", a.ucAudioBitRateMode); dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n", a.dwAudioBitRate); dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n", a.dwAudioBitRatePeak); dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n", ar.width, ar.height); return ret; } int saa7164_api_set_aspect_ratio(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s(%d)\n", __func__, port->encoder_params.ctl_aspect); switch (port->encoder_params.ctl_aspect) { case V4L2_MPEG_VIDEO_ASPECT_1x1: ar.width = 1; ar.height = 1; break; case V4L2_MPEG_VIDEO_ASPECT_4x3: ar.width = 4; ar.height = 3; break; case V4L2_MPEG_VIDEO_ASPECT_16x9: ar.width = 16; ar.height = 9; break; case V4L2_MPEG_VIDEO_ASPECT_221x100: ar.width = 221; ar.height = 100; break; default: BUG(); } dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__, port->encoder_params.ctl_aspect, ar.width, ar.height); /* Aspect Ratio */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; if (ctl == PU_BRIGHTNESS_CONTROL) val = port->ctl_brightness; else if (ctl == PU_CONTRAST_CONTROL) val = port->ctl_contrast; else if (ctl == PU_HUE_CONTROL) val = port->ctl_hue; else if (ctl == PU_SATURATION_CONTROL) val = port->ctl_saturation; else if (ctl == PU_SHARPNESS_CONTROL) val = port->ctl_sharpness; else return -EINVAL; dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n", __func__, port->encunit.vsourceid, ctl, val); ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n", __func__, ctl, val); if (ctl == PU_BRIGHTNESS_CONTROL) port->ctl_brightness = val; else if (ctl == PU_CONTRAST_CONTROL) port->ctl_contrast = val; else if (ctl == PU_HUE_CONTROL) port->ctl_hue = val; else if (ctl == PU_SATURATION_CONTROL) port->ctl_saturation = val; else if (ctl == PU_SHARPNESS_CONTROL) port->ctl_sharpness = val; return ret; } int saa7164_api_set_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 }; int ret; dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n", __func__, port->mux_input, inputs[port->mux_input - 1]); /* Audio Mute */ ret = saa7164_api_audio_mute(port, 1); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Video Mux */ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio Mux */ ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &inputs[port->mux_input - 1]); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio UnMute */ ret = saa7164_api_audio_mute(port, 0); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_audio_mute(struct saa7164_port *port, int mute) { struct saa7164_dev *dev = port->dev; u8 v = mute; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, MUTE_CONTROL, sizeof(u8), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } /* 0 = silence, 0xff = full */ int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level) { struct saa7164_dev *dev = port->dev; s16 v, min, max; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, level); /* Obtain the min/max ranges */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN, VOLUME_CONTROL, sizeof(u16), &min); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX, VOLUME_CONTROL, sizeof(u16), &max); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); v = level; if (v < min) v = min; if (v > max) v = max; /* Left */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Right */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); return ret; } int saa7164_api_set_audio_std(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResAudioDefaults lvl; struct tmComResTunerStandard tvaudio; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); /* Establish default levels */ lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT; lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT; lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT; lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT; ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Manually select the appropriate TV audio standard */ if (port->encodernorm.id & V4L2_STD_NTSC) { tvaudio.std = TU_STANDARD_NTSC_M; tvaudio.country = 1; } else { tvaudio.std = TU_STANDARD_PAL_I; tvaudio.country = 44; } ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect) { struct saa7164_dev *dev = port->dev; struct tmComResTunerStandardAuto p; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect); /* Disable TV Audio autodetect if not already set (buggy) */ if (autodetect) p.mode = TU_STANDARD_AUTO; else p.mode = TU_STANDARD_MANUAL; ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_AUTO_CONTROL, sizeof(p), &p); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "%s() v_mux=%d\n", __func__, port->mux_input); return ret; } static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val) { struct saa7164_dev *dev = port->dev; u16 len = 0; u8 buf[256]; int ret; u8 mas; dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__, port->nr, port->type, val); if (port->nr == 0) mas = 0xd0; else mas = 0xe0; memset(buf, 0, sizeof(buf)); buf[0x00] = 0x04; buf[0x01] = 0x00; buf[0x02] = 0x00; buf[0x03] = 0x00; buf[0x04] = 0x04; buf[0x05] = 0x00; buf[0x06] = 0x00; buf[0x07] = 0x00; buf[0x08] = reg; buf[0x09] = 0x26; buf[0x0a] = mas; buf[0x0b] = 0xb0; buf[0x0c] = val; buf[0x0d] = 0x00; buf[0x0e] = 0x00; buf[0x0f] = 0x00; ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); #if 0 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, 16, false); #endif return ret == SAA_OK ? 0 : -EIO; } /* Disable the IF block AGC controls */ int saa7164_api_configure_dif(struct saa7164_port *port, u32 std) { struct saa7164_dev *dev = port->dev; int ret = 0; u8 agc_disable; dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std); if (std & V4L2_STD_NTSC) { dprintk(DBGLVL_API, " NTSC\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_I) { dprintk(DBGLVL_API, " PAL-I\n"); saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_M) { dprintk(DBGLVL_API, " PAL-M\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_N) { dprintk(DBGLVL_API, " PAL-N\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_Nc) { dprintk(DBGLVL_API, " PAL-Nc\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_B) { dprintk(DBGLVL_API, " PAL-B\n"); saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_DK) { dprintk(DBGLVL_API, " PAL-DK\n"); saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_SECAM_L) { dprintk(DBGLVL_API, " SECAM-L\n"); saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */ agc_disable = 0; } else { /* Unknown standard, assume DTV */ dprintk(DBGLVL_API, " Unknown (assuming DTV)\n"); /* Undefinded Video Standard */ saa7164_api_set_dif(port, 0x00, 0x80); agc_disable = 1; } saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */ saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */ saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */ saa7164_api_set_dif(port, 0x04, 0x01); /* Active */ msleep(100); saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */ msleep(100); return ret; } /* Ensure the dif is in the correct state for the operating mode * (analog / dtv). We only configure the diff through the analog encoder * so when we're in digital mode we need to find the appropriate encoder * and use it to configure the DIF. */ int saa7164_api_initialize_dif(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_port *p = NULL; int ret = -EINVAL; u32 std = 0; dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__, port->nr, port->type); if (port->type == SAA7164_MPEG_ENCODER) { /* Pick any analog standard to init the diff. * we'll come back during encoder_init' * and set the correct standard if requried. */ std = V4L2_STD_NTSC; } else if (port->type == SAA7164_MPEG_DVB) { if (port->nr == SAA7164_PORT_TS1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else if (port->type == SAA7164_MPEG_VBI) { std = V4L2_STD_NTSC; if (port->nr == SAA7164_PORT_VBI1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else BUG(); if (p) ret = saa7164_api_configure_dif(p, std); return ret; } int saa7164_api_transition_port(struct saa7164_port *port, u8 mode) { struct saa7164_dev *dev = port->dev; int ret; dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n", __func__, port->nr, port->hwcfg.unitid, mode); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_STATE_CONTROL, sizeof(mode), &mode); if (ret != SAA_OK) printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n", __func__, port->nr, port->hwcfg.unitid, ret); return ret; } int saa7164_api_get_fw_version(struct saa7164_dev *dev, u32 *version) { int ret; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_VERSION_CONTROL, sizeof(u32), version); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen) { u8 reg[] = { 0x0f, 0x00 }; if (buflen < 128) return -ENOMEM; /* Assumption: Hauppauge eeprom is at 0xa0 on on bus 0 */ /* TODO: Pull the details from the boards struct */ return saa7164_api_i2c_read(&dev->i2c_bus[0], 0xa0 >> 1, sizeof(reg), &reg[0], 128, buf); } static int saa7164_api_configure_port_vbi(struct saa7164_dev *dev, struct saa7164_port *port) { struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc; dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard); dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine); dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine); dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate); dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n", port->nr); return 0; } static int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResTSFormatDescrHeader *tsfmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex); dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset); dprintk(DBGLVL_API, " bPacketLength= 0x%x\n", tsfmt->bPacketLength); dprintk(DBGLVL_API, " bStrideLength= 0x%x\n", tsfmt->bStrideLength); dprintk(DBGLVL_API, " bguid = (....)\n"); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGTS (becomes dev->ts[%d])\n", port->nr); return 0; } static int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResPSFormatDescrHeader *fmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength); dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength); dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType); /* Cache the hardware configuration in the port */ /* TODO: CHECK THIS in the port config */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n", port->nr); return 0; } static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len) { struct saa7164_port *tsport = NULL; struct saa7164_port *encport = NULL; struct saa7164_port *vbiport = NULL; u32 idx, next_offset; int i; struct tmComResDescrHeader *hdr, *t; struct tmComResExtDevDescrHeader *exthdr; struct tmComResPathDescrHeader *pathhdr; struct tmComResAntTermDescrHeader *anttermhdr; struct tmComResTunerDescrHeader *tunerunithdr; struct tmComResDMATermDescrHeader *vcoutputtermhdr; struct tmComResTSFormatDescrHeader *tsfmt; struct tmComResPSFormatDescrHeader *psfmt; struct tmComResSelDescrHeader *psel; struct tmComResProcDescrHeader *pdh; struct tmComResAFeatureDescrHeader *afd; struct tmComResEncoderDescrHeader *edh; struct tmComResVBIFormatDescrHeader *vbifmt; u32 currpath = 0; dprintk(DBGLVL_API, "%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n", __func__, len, (u32)sizeof(struct tmComResDescrHeader)); for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) { hdr = (struct tmComResDescrHeader *)(buf + idx); if (hdr->type != CS_INTERFACE) return SAA_ERR_NOT_SUPPORTED; dprintk(DBGLVL_API, "@ 0x%x =\n", idx); switch (hdr->subtype) { case GENERAL_REQUEST: dprintk(DBGLVL_API, " GENERAL_REQUEST\n"); break; case VC_TUNER_PATH: dprintk(DBGLVL_API, " VC_TUNER_PATH\n"); pathhdr = (struct tmComResPathDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " pathid = 0x%x\n", pathhdr->pathid); currpath = pathhdr->pathid; break; case VC_INPUT_TERMINAL: dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n"); anttermhdr = (struct tmComResAntTermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " terminalid = 0x%x\n", anttermhdr->terminalid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", anttermhdr->terminaltype); switch (anttermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", anttermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", anttermhdr->assocterminal); dprintk(DBGLVL_API, " iterminal = 0x%x\n", anttermhdr->iterminal); dprintk(DBGLVL_API, " controlsize = 0x%x\n", anttermhdr->controlsize); break; case VC_OUTPUT_TERMINAL: dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n"); vcoutputtermhdr = (struct tmComResDMATermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", vcoutputtermhdr->unitid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", vcoutputtermhdr->terminaltype); switch (vcoutputtermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", vcoutputtermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", vcoutputtermhdr->assocterminal); dprintk(DBGLVL_API, " sourceid = 0x%x\n", vcoutputtermhdr->sourceid); dprintk(DBGLVL_API, " iterminal = 0x%x\n", vcoutputtermhdr->iterminal); dprintk(DBGLVL_API, " BARLocation = 0x%x\n", vcoutputtermhdr->BARLocation); dprintk(DBGLVL_API, " flags = 0x%x\n", vcoutputtermhdr->flags); dprintk(DBGLVL_API, " interruptid = 0x%x\n", vcoutputtermhdr->interruptid); dprintk(DBGLVL_API, " buffercount = 0x%x\n", vcoutputtermhdr->buffercount); dprintk(DBGLVL_API, " metadatasize = 0x%x\n", vcoutputtermhdr->metadatasize); dprintk(DBGLVL_API, " controlsize = 0x%x\n", vcoutputtermhdr->controlsize); dprintk(DBGLVL_API, " numformats = 0x%x\n", vcoutputtermhdr->numformats); t = (struct tmComResDescrHeader *) ((struct tmComResDMATermDescrHeader *)(buf + idx)); next_offset = idx + (vcoutputtermhdr->len); for (i = 0; i < vcoutputtermhdr->numformats; i++) { t = (struct tmComResDescrHeader *) (buf + next_offset); switch (t->subtype) { case VS_FORMAT_MPEG2TS: tsfmt = (struct tmComResTSFormatDescrHeader *)t; if (currpath == 1) tsport = &dev->ports[SAA7164_PORT_TS1]; else tsport = &dev->ports[SAA7164_PORT_TS2]; memcpy(&tsport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ts(dev, tsport, tsfmt); break; case VS_FORMAT_MPEG2PS: psfmt = (struct tmComResPSFormatDescrHeader *)t; if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ps(dev, encport, psfmt); break; case VS_FORMAT_VBI: vbifmt = (struct tmComResVBIFormatDescrHeader *)t; if (currpath == 1) vbiport = &dev->ports[SAA7164_PORT_VBI1]; else vbiport = &dev->ports[SAA7164_PORT_VBI2]; memcpy(&vbiport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); memcpy(&vbiport->vbi_fmt_ntsc, vbifmt, sizeof(*vbifmt)); saa7164_api_configure_port_vbi(dev, vbiport); break; case VS_FORMAT_RDS: dprintk(DBGLVL_API, " = VS_FORMAT_RDS\n"); break; case VS_FORMAT_UNCOMPRESSED: dprintk(DBGLVL_API, " = VS_FORMAT_UNCOMPRESSED\n"); break; case VS_FORMAT_TYPE: dprintk(DBGLVL_API, " = VS_FORMAT_TYPE\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", t->subtype); } next_offset += t->len; } break; case TUNER_UNIT: dprintk(DBGLVL_API, " TUNER_UNIT\n"); tunerunithdr = (struct tmComResTunerDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", tunerunithdr->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", tunerunithdr->sourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", tunerunithdr->iunit); dprintk(DBGLVL_API, " tuningstandards = 0x%x\n", tunerunithdr->tuningstandards); dprintk(DBGLVL_API, " controlsize = 0x%x\n", tunerunithdr->controlsize); dprintk(DBGLVL_API, " controls = 0x%x\n", tunerunithdr->controls); if (tunerunithdr->unitid == tunerunithdr->iunit) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->tunerunit, tunerunithdr, sizeof(struct tmComResTunerDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d] tuner)\n", encport->nr); } break; case VC_SELECTOR_UNIT: psel = (struct tmComResSelDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", psel->unitid); dprintk(DBGLVL_API, " nrinpins = 0x%x\n", psel->nrinpins); dprintk(DBGLVL_API, " sourceid = 0x%x\n", psel->sourceid); break; case VC_PROCESSING_UNIT: pdh = (struct tmComResProcDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", pdh->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", pdh->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", pdh->controlsize); if (pdh->controlsize == 0x04) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->vidproc, pdh, sizeof(struct tmComResProcDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case FEATURE_UNIT: afd = (struct tmComResAFeatureDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " FEATURE_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", afd->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", afd->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", afd->controlsize); if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->audfeat, afd, sizeof(struct tmComResAFeatureDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); break; case ENCODER_UNIT: edh = (struct tmComResEncoderDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " ENCODER_UNIT\n"); dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype); dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid); dprintk(DBGLVL_API, " vsourceid = 0x%x\n", edh->vsourceid); dprintk(DBGLVL_API, " asourceid = 0x%x\n", edh->asourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit); if (edh->iunit == edh->unitid) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->encunit, edh, sizeof(struct tmComResEncoderDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case EXTENSION_UNIT: dprintk(DBGLVL_API, " EXTENSION_UNIT\n"); exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", exthdr->unitid); dprintk(DBGLVL_API, " deviceid = 0x%x\n", exthdr->deviceid); dprintk(DBGLVL_API, " devicetype = 0x%x\n", exthdr->devicetype); if (exthdr->devicetype & 0x1) dprintk(DBGLVL_API, " = Decoder Device\n"); if (exthdr->devicetype & 0x2) dprintk(DBGLVL_API, " = GPIO Source\n"); if (exthdr->devicetype & 0x4) dprintk(DBGLVL_API, " = Video Decoder\n"); if (exthdr->devicetype & 0x8) dprintk(DBGLVL_API, " = Audio Decoder\n"); if (exthdr->devicetype & 0x20) dprintk(DBGLVL_API, " = Crossbar\n"); if (exthdr->devicetype & 0x40) dprintk(DBGLVL_API, " = Tuner\n"); if (exthdr->devicetype & 0x80) dprintk(DBGLVL_API, " = IF PLL\n"); if (exthdr->devicetype & 0x100) dprintk(DBGLVL_API, " = Demodulator\n"); if (exthdr->devicetype & 0x200) dprintk(DBGLVL_API, " = RDS Decoder\n"); if (exthdr->devicetype & 0x400) dprintk(DBGLVL_API, " = Encoder\n"); if (exthdr->devicetype & 0x800) dprintk(DBGLVL_API, " = IR Decoder\n"); if (exthdr->devicetype & 0x1000) dprintk(DBGLVL_API, " = EEPROM\n"); if (exthdr->devicetype & 0x2000) dprintk(DBGLVL_API, " = VBI Decoder\n"); if (exthdr->devicetype & 0x10000) dprintk(DBGLVL_API, " = Streaming Device\n"); if (exthdr->devicetype & 0x20000) dprintk(DBGLVL_API, " = DRM Device\n"); if (exthdr->devicetype & 0x40000000) dprintk(DBGLVL_API, " = Generic Device\n"); if (exthdr->devicetype & 0x80000000) dprintk(DBGLVL_API, " = Config Space Device\n"); dprintk(DBGLVL_API, " numgpiopins = 0x%x\n", exthdr->numgpiopins); dprintk(DBGLVL_API, " numgpiogroups = 0x%x\n", exthdr->numgpiogroups); dprintk(DBGLVL_API, " controlsize = 0x%x\n", exthdr->controlsize); if (exthdr->devicetype & 0x80) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->ifunit, exthdr, sizeof(struct tmComResExtDevDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case PVC_INFRARED_UNIT: dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n"); break; case DRM_UNIT: dprintk(DBGLVL_API, " DRM_UNIT\n"); break; default: dprintk(DBGLVL_API, "default %d\n", hdr->subtype); } dprintk(DBGLVL_API, " 1.%x\n", hdr->len); dprintk(DBGLVL_API, " 2.%x\n", hdr->type); dprintk(DBGLVL_API, " 3.%x\n", hdr->subtype); dprintk(DBGLVL_API, " 4.%x\n", hdr->unitid); idx += hdr->len; } return 0; } int saa7164_api_enum_subdevs(struct saa7164_dev *dev) { int ret; u32 buflen = 0; u8 *buf; dprintk(DBGLVL_API, "%s()\n", __func__); /* Get the total descriptor length */ ret = saa7164_cmd_send(dev, 0, GET_LEN, GET_DESCRIPTORS_CONTROL, sizeof(buflen), &buflen); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() total descriptor size = %d bytes.\n", __func__, buflen); /* Allocate enough storage for all of the descs */ buf = kzalloc(buflen, GFP_KERNEL); if (!buf) return SAA_ERR_NO_RESOURCES; /* Retrieve them */ ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DESCRIPTORS_CONTROL, buflen, buf); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); goto out; } if (saa_debug & DBGLVL_API) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, buflen & ~15, false); saa7164_api_dump_subdevs(dev, buf, buflen); out: kfree(buf); return ret; } int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if (reglen > 4) return -EIO; /* Prepare the send buffer */ /* Bytes 00-03 source register length * 04-07 source bytes to read * 08... register address */ memset(buf, 0, sizeof(buf)); memcpy((buf + 2 * sizeof(u32) + 0), reg, reglen); *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen; unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); if (saa_debug & DBGLVL_I2C) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, 32, false); ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); else { if (saa_debug & DBGLVL_I2C) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, sizeof(buf), false); memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen); } return ret == SAA_OK ? 0 : -EIO; } /* For a given 8 bit i2c address device, write the buffer */ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; int reglen; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if ((datalen == 0) || (datalen > 232)) return -EIO; memset(buf, 0, sizeof(buf)); unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } reglen = saa7164_i2caddr_to_reglen(bus, addr); if (reglen < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr to reglen\n", __func__); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); /* Prepare the send buffer */ /* Bytes 00-03 dest register length * 04-07 dest bytes to write * 08... register address */ *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen; memcpy((buf + 2 * sizeof(u32)), data, datalen); if (saa_debug & DBGLVL_I2C) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, sizeof(buf), false); ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); return ret == SAA_OK ? 0 : -EIO; } static int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid, u8 pin, u8 state) { int ret; struct tmComResGPIO t; dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n", __func__, unitid, pin, state); if ((pin > 7) || (state > 2)) return SAA_ERR_BAD_PARAMETER; t.pin = pin; t.state = state; ret = saa7164_cmd_send(dev, unitid, SET_CUR, EXU_GPIO_CONTROL, sizeof(t), &t); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 1); } int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 0); }
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHV-E160S
drivers/mfd/max8925-core.c
2945
17496
/* * Base driver for Maxim MAX8925 * * Copyright (C) 2009-2010 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mfd/core.h> #include <linux/mfd/max8925.h> static struct resource backlight_resources[] = { { .name = "max8925-backlight", .start = MAX8925_WLED_MODE_CNTL, .end = MAX8925_WLED_CNTL, .flags = IORESOURCE_IO, }, }; static struct mfd_cell backlight_devs[] = { { .name = "max8925-backlight", .num_resources = 1, .resources = &backlight_resources[0], .id = -1, }, }; static struct resource touch_resources[] = { { .name = "max8925-tsc", .start = MAX8925_TSC_IRQ, .end = MAX8925_ADC_RES_END, .flags = IORESOURCE_IO, }, }; static struct mfd_cell touch_devs[] = { { .name = "max8925-touch", .num_resources = 1, .resources = &touch_resources[0], .id = -1, }, }; static struct resource power_supply_resources[] = { { .name = "max8925-power", .start = MAX8925_CHG_IRQ1, .end = MAX8925_CHG_IRQ1_MASK, .flags = IORESOURCE_IO, }, }; static struct mfd_cell power_devs[] = { { .name = "max8925-power", .num_resources = 1, .resources = &power_supply_resources[0], .id = -1, }, }; static struct resource rtc_resources[] = { { .name = "max8925-rtc", .start = MAX8925_RTC_IRQ, .end = MAX8925_RTC_IRQ_MASK, .flags = IORESOURCE_IO, }, }; static struct mfd_cell rtc_devs[] = { { .name = "max8925-rtc", .num_resources = 1, .resources = &rtc_resources[0], .id = -1, }, }; static struct resource onkey_resources[] = { { .name = "max8925-onkey", .start = MAX8925_IRQ_GPM_SW_R, .end = MAX8925_IRQ_GPM_SW_R, .flags = IORESOURCE_IRQ, }, { .name = "max8925-onkey", .start = MAX8925_IRQ_GPM_SW_F, .end = MAX8925_IRQ_GPM_SW_F, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell onkey_devs[] = { { .name = "max8925-onkey", .num_resources = 2, .resources = &onkey_resources[0], .id = -1, }, }; #define MAX8925_REG_RESOURCE(_start, _end) \ { \ .start = MAX8925_##_start, \ .end = MAX8925_##_end, \ .flags = IORESOURCE_IO, \ } static struct resource regulator_resources[] = { MAX8925_REG_RESOURCE(SDCTL1, SDCTL1), MAX8925_REG_RESOURCE(SDCTL2, SDCTL2), MAX8925_REG_RESOURCE(SDCTL3, SDCTL3), MAX8925_REG_RESOURCE(LDOCTL1, LDOCTL1), MAX8925_REG_RESOURCE(LDOCTL2, LDOCTL2), MAX8925_REG_RESOURCE(LDOCTL3, LDOCTL3), MAX8925_REG_RESOURCE(LDOCTL4, LDOCTL4), MAX8925_REG_RESOURCE(LDOCTL5, LDOCTL5), MAX8925_REG_RESOURCE(LDOCTL6, LDOCTL6), MAX8925_REG_RESOURCE(LDOCTL7, LDOCTL7), MAX8925_REG_RESOURCE(LDOCTL8, LDOCTL8), MAX8925_REG_RESOURCE(LDOCTL9, LDOCTL9), MAX8925_REG_RESOURCE(LDOCTL10, LDOCTL10), MAX8925_REG_RESOURCE(LDOCTL11, LDOCTL11), MAX8925_REG_RESOURCE(LDOCTL12, LDOCTL12), MAX8925_REG_RESOURCE(LDOCTL13, LDOCTL13), MAX8925_REG_RESOURCE(LDOCTL14, LDOCTL14), MAX8925_REG_RESOURCE(LDOCTL15, LDOCTL15), MAX8925_REG_RESOURCE(LDOCTL16, LDOCTL16), MAX8925_REG_RESOURCE(LDOCTL17, LDOCTL17), MAX8925_REG_RESOURCE(LDOCTL18, LDOCTL18), MAX8925_REG_RESOURCE(LDOCTL19, LDOCTL19), MAX8925_REG_RESOURCE(LDOCTL20, LDOCTL20), }; #define MAX8925_REG_DEVS(_id) \ { \ .name = "max8925-regulator", \ .num_resources = 1, \ .resources = &regulator_resources[MAX8925_ID_##_id], \ .id = MAX8925_ID_##_id, \ } static struct mfd_cell regulator_devs[] = { MAX8925_REG_DEVS(SD1), MAX8925_REG_DEVS(SD2), MAX8925_REG_DEVS(SD3), MAX8925_REG_DEVS(LDO1), MAX8925_REG_DEVS(LDO2), MAX8925_REG_DEVS(LDO3), MAX8925_REG_DEVS(LDO4), MAX8925_REG_DEVS(LDO5), MAX8925_REG_DEVS(LDO6), MAX8925_REG_DEVS(LDO7), MAX8925_REG_DEVS(LDO8), MAX8925_REG_DEVS(LDO9), MAX8925_REG_DEVS(LDO10), MAX8925_REG_DEVS(LDO11), MAX8925_REG_DEVS(LDO12), MAX8925_REG_DEVS(LDO13), MAX8925_REG_DEVS(LDO14), MAX8925_REG_DEVS(LDO15), MAX8925_REG_DEVS(LDO16), MAX8925_REG_DEVS(LDO17), MAX8925_REG_DEVS(LDO18), MAX8925_REG_DEVS(LDO19), MAX8925_REG_DEVS(LDO20), }; enum { FLAGS_ADC = 1, /* register in ADC component */ FLAGS_RTC, /* register in RTC component */ }; struct max8925_irq_data { int reg; int mask_reg; int enable; /* enable or not */ int offs; /* bit offset in mask register */ int flags; int tsc_irq; }; static struct max8925_irq_data max8925_irqs[] = { [MAX8925_IRQ_VCHG_DC_OVP] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_VCHG_DC_F] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_VCHG_DC_R] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_VCHG_USB_OVP] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 3, }, [MAX8925_IRQ_VCHG_USB_F] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 4, }, [MAX8925_IRQ_VCHG_USB_R] = { .reg = MAX8925_CHG_IRQ1, .mask_reg = MAX8925_CHG_IRQ1_MASK, .offs = 1 << 5, }, [MAX8925_IRQ_VCHG_THM_OK_R] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_VCHG_THM_OK_F] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_VCHG_SYSLOW_F] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_VCHG_SYSLOW_R] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 3, }, [MAX8925_IRQ_VCHG_RST] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 4, }, [MAX8925_IRQ_VCHG_DONE] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 5, }, [MAX8925_IRQ_VCHG_TOPOFF] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 6, }, [MAX8925_IRQ_VCHG_TMR_FAULT] = { .reg = MAX8925_CHG_IRQ2, .mask_reg = MAX8925_CHG_IRQ2_MASK, .offs = 1 << 7, }, [MAX8925_IRQ_GPM_RSTIN] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_GPM_MPL] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_GPM_SW_3SEC] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 2, }, [MAX8925_IRQ_GPM_EXTON_F] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 3, }, [MAX8925_IRQ_GPM_EXTON_R] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 4, }, [MAX8925_IRQ_GPM_SW_1SEC] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 5, }, [MAX8925_IRQ_GPM_SW_F] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 6, }, [MAX8925_IRQ_GPM_SW_R] = { .reg = MAX8925_ON_OFF_IRQ1, .mask_reg = MAX8925_ON_OFF_IRQ1_MASK, .offs = 1 << 7, }, [MAX8925_IRQ_GPM_SYSCKEN_F] = { .reg = MAX8925_ON_OFF_IRQ2, .mask_reg = MAX8925_ON_OFF_IRQ2_MASK, .offs = 1 << 0, }, [MAX8925_IRQ_GPM_SYSCKEN_R] = { .reg = MAX8925_ON_OFF_IRQ2, .mask_reg = MAX8925_ON_OFF_IRQ2_MASK, .offs = 1 << 1, }, [MAX8925_IRQ_RTC_ALARM1] = { .reg = MAX8925_RTC_IRQ, .mask_reg = MAX8925_RTC_IRQ_MASK, .offs = 1 << 2, .flags = FLAGS_RTC, }, [MAX8925_IRQ_RTC_ALARM0] = { .reg = MAX8925_RTC_IRQ, .mask_reg = MAX8925_RTC_IRQ_MASK, .offs = 1 << 3, .flags = FLAGS_RTC, }, [MAX8925_IRQ_TSC_STICK] = { .reg = MAX8925_TSC_IRQ, .mask_reg = MAX8925_TSC_IRQ_MASK, .offs = 1 << 0, .flags = FLAGS_ADC, .tsc_irq = 1, }, [MAX8925_IRQ_TSC_NSTICK] = { .reg = MAX8925_TSC_IRQ, .mask_reg = MAX8925_TSC_IRQ_MASK, .offs = 1 << 1, .flags = FLAGS_ADC, .tsc_irq = 1, }, }; static inline struct max8925_irq_data *irq_to_max8925(struct max8925_chip *chip, int irq) { return &max8925_irqs[irq - chip->irq_base]; } static irqreturn_t max8925_irq(int irq, void *data) { struct max8925_chip *chip = data; struct max8925_irq_data *irq_data; struct i2c_client *i2c; int read_reg = -1, value = 0; int i; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* TSC IRQ should be serviced in max8925_tsc_irq() */ if (irq_data->tsc_irq) continue; if (irq_data->flags == FLAGS_RTC) i2c = chip->rtc; else if (irq_data->flags == FLAGS_ADC) i2c = chip->adc; else i2c = chip->i2c; if (read_reg != irq_data->reg) { read_reg = irq_data->reg; value = max8925_reg_read(i2c, irq_data->reg); } if (value & irq_data->enable) handle_nested_irq(chip->irq_base + i); } return IRQ_HANDLED; } static irqreturn_t max8925_tsc_irq(int irq, void *data) { struct max8925_chip *chip = data; struct max8925_irq_data *irq_data; struct i2c_client *i2c; int read_reg = -1, value = 0; int i; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* non TSC IRQ should be serviced in max8925_irq() */ if (!irq_data->tsc_irq) continue; if (irq_data->flags == FLAGS_RTC) i2c = chip->rtc; else if (irq_data->flags == FLAGS_ADC) i2c = chip->adc; else i2c = chip->i2c; if (read_reg != irq_data->reg) { read_reg = irq_data->reg; value = max8925_reg_read(i2c, irq_data->reg); } if (value & irq_data->enable) handle_nested_irq(chip->irq_base + i); } return IRQ_HANDLED; } static void max8925_irq_lock(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); mutex_lock(&chip->irq_lock); } static void max8925_irq_sync_unlock(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); struct max8925_irq_data *irq_data; static unsigned char cache_chg[2] = {0xff, 0xff}; static unsigned char cache_on[2] = {0xff, 0xff}; static unsigned char cache_rtc = 0xff, cache_tsc = 0xff; unsigned char irq_chg[2], irq_on[2]; unsigned char irq_rtc, irq_tsc; int i; /* Load cached value. In initial, all IRQs are masked */ irq_chg[0] = cache_chg[0]; irq_chg[1] = cache_chg[1]; irq_on[0] = cache_on[0]; irq_on[1] = cache_on[1]; irq_rtc = cache_rtc; irq_tsc = cache_tsc; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; /* 1 -- disable, 0 -- enable */ switch (irq_data->mask_reg) { case MAX8925_CHG_IRQ1_MASK: irq_chg[0] &= ~irq_data->enable; break; case MAX8925_CHG_IRQ2_MASK: irq_chg[1] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ1_MASK: irq_on[0] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ2_MASK: irq_on[1] &= ~irq_data->enable; break; case MAX8925_RTC_IRQ_MASK: irq_rtc &= ~irq_data->enable; break; case MAX8925_TSC_IRQ_MASK: irq_tsc &= ~irq_data->enable; break; default: dev_err(chip->dev, "wrong IRQ\n"); break; } } /* update mask into registers */ if (cache_chg[0] != irq_chg[0]) { cache_chg[0] = irq_chg[0]; max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, irq_chg[0]); } if (cache_chg[1] != irq_chg[1]) { cache_chg[1] = irq_chg[1]; max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ2_MASK, irq_chg[1]); } if (cache_on[0] != irq_on[0]) { cache_on[0] = irq_on[0]; max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, irq_on[0]); } if (cache_on[1] != irq_on[1]) { cache_on[1] = irq_on[1]; max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, irq_on[1]); } if (cache_rtc != irq_rtc) { cache_rtc = irq_rtc; max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, irq_rtc); } if (cache_tsc != irq_tsc) { cache_tsc = irq_tsc; max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, irq_tsc); } mutex_unlock(&chip->irq_lock); } static void max8925_irq_enable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); max8925_irqs[data->irq - chip->irq_base].enable = max8925_irqs[data->irq - chip->irq_base].offs; } static void max8925_irq_disable(struct irq_data *data) { struct max8925_chip *chip = irq_data_get_irq_chip_data(data); max8925_irqs[data->irq - chip->irq_base].enable = 0; } static struct irq_chip max8925_irq_chip = { .name = "max8925", .irq_bus_lock = max8925_irq_lock, .irq_bus_sync_unlock = max8925_irq_sync_unlock, .irq_enable = max8925_irq_enable, .irq_disable = max8925_irq_disable, }; static int max8925_irq_init(struct max8925_chip *chip, int irq, struct max8925_platform_data *pdata) { unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT; int i, ret; int __irq; if (!pdata || !pdata->irq_base) { dev_warn(chip->dev, "No interrupt support on IRQ base\n"); return -EINVAL; } /* clear all interrupts */ max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ1); max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ2); max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ1); max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2); max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ); max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); /* mask all interrupts except for TSC */ max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0); max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ2_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff); max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff); mutex_init(&chip->irq_lock); chip->core_irq = irq; chip->irq_base = pdata->irq_base; /* register with genirq */ for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { __irq = i + chip->irq_base; irq_set_chip_data(__irq, chip); irq_set_chip_and_handler(__irq, &max8925_irq_chip, handle_edge_irq); irq_set_nested_thread(__irq, 1); #ifdef CONFIG_ARM set_irq_flags(__irq, IRQF_VALID); #else irq_set_noprobe(__irq); #endif } if (!irq) { dev_warn(chip->dev, "No interrupt support on core IRQ\n"); goto tsc_irq; } ret = request_threaded_irq(irq, NULL, max8925_irq, flags, "max8925", chip); if (ret) { dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret); chip->core_irq = 0; } tsc_irq: /* mask TSC interrupt */ max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f); if (!pdata->tsc_irq) { dev_warn(chip->dev, "No interrupt support on TSC IRQ\n"); return 0; } chip->tsc_irq = pdata->tsc_irq; ret = request_threaded_irq(chip->tsc_irq, NULL, max8925_tsc_irq, flags, "max8925-tsc", chip); if (ret) { dev_err(chip->dev, "Failed to request TSC IRQ: %d\n", ret); chip->tsc_irq = 0; } return 0; } int __devinit max8925_device_init(struct max8925_chip *chip, struct max8925_platform_data *pdata) { int ret; max8925_irq_init(chip, chip->i2c->irq, pdata); if (pdata && (pdata->power || pdata->touch)) { /* enable ADC to control internal reference */ max8925_set_bits(chip->i2c, MAX8925_RESET_CNFG, 1, 1); /* enable internal reference for ADC */ max8925_set_bits(chip->adc, MAX8925_TSC_CNFG1, 3, 2); /* check for internal reference IRQ */ do { ret = max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); } while (ret & MAX8925_NREF_OK); /* enaable ADC scheduler, interval is 1 second */ max8925_set_bits(chip->adc, MAX8925_ADC_SCHED, 3, 2); } /* enable Momentary Power Loss */ max8925_set_bits(chip->rtc, MAX8925_MPL_CNTL, 1 << 4, 1 << 4); ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], ARRAY_SIZE(rtc_devs), &rtc_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add rtc subdev\n"); goto out; } ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add onkey subdev\n"); goto out_dev; } if (pdata) { ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0], ARRAY_SIZE(regulator_devs), &regulator_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add regulator subdev\n"); goto out_dev; } } if (pdata && pdata->backlight) { ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0], ARRAY_SIZE(backlight_devs), &backlight_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add backlight subdev\n"); goto out_dev; } } if (pdata && pdata->power) { ret = mfd_add_devices(chip->dev, 0, &power_devs[0], ARRAY_SIZE(power_devs), &power_supply_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add power supply " "subdev\n"); goto out_dev; } } if (pdata && pdata->touch) { ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], ARRAY_SIZE(touch_devs), &touch_resources[0], 0); if (ret < 0) { dev_err(chip->dev, "Failed to add touch subdev\n"); goto out_dev; } } return 0; out_dev: mfd_remove_devices(chip->dev); out: return ret; } void __devexit max8925_device_exit(struct max8925_chip *chip) { if (chip->core_irq) free_irq(chip->core_irq, chip); if (chip->tsc_irq) free_irq(chip->tsc_irq, chip); mfd_remove_devices(chip->dev); } MODULE_DESCRIPTION("PMIC Driver for Maxim MAX8925"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com"); MODULE_LICENSE("GPL");
gpl-2.0
Menpiko/SnaPKernel-N
drivers/media/pci/cx23885/altera-ci.c
2945
20984
/* * altera-ci.c * * CI driver in conjunction with NetUp Dual DVB-T/C RF CI card * * Copyright (C) 2010,2011 NetUP Inc. * Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * currently cx23885 GPIO's used. * GPIO-0 ~INT in * GPIO-1 TMS out * GPIO-2 ~reset chips out * GPIO-3 to GPIO-10 data/addr for CA in/out * GPIO-11 ~CS out * GPIO-12 AD_RG out * GPIO-13 ~WR out * GPIO-14 ~RD out * GPIO-15 ~RDY in * GPIO-16 TCK out * GPIO-17 TDO in * GPIO-18 TDI out */ /* * Bit definitions for MC417_RWD and MC417_OEN registers * bits 31-16 * +-----------+ * | Reserved | * +-----------+ * bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | TDI | TDO | TCK | RDY# | #RD | #WR | AD_RG | #CS | * +-------+-------+-------+-------+-------+-------+-------+-------+ * bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0| * +-------+-------+-------+-------+-------+-------+-------+-------+ */ #include <media/videobuf-dma-sg.h> #include <media/videobuf-dvb.h> #include "altera-ci.h" #include "dvb_ca_en50221.h" /* FPGA regs */ #define NETUP_CI_INT_CTRL 0x00 #define NETUP_CI_BUSCTRL2 0x01 #define NETUP_CI_ADDR0 0x04 #define NETUP_CI_ADDR1 0x05 #define NETUP_CI_DATA 0x06 #define NETUP_CI_BUSCTRL 0x07 #define NETUP_CI_PID_ADDR0 0x08 #define NETUP_CI_PID_ADDR1 0x09 #define NETUP_CI_PID_DATA 0x0a #define NETUP_CI_TSA_DIV 0x0c #define NETUP_CI_TSB_DIV 0x0d #define NETUP_CI_REVISION 0x0f /* const for ci op */ #define NETUP_CI_FLG_CTL 1 #define NETUP_CI_FLG_RD 1 #define NETUP_CI_FLG_AD 1 static unsigned int ci_dbg; module_param(ci_dbg, int, 0644); MODULE_PARM_DESC(ci_dbg, "Enable CI debugging"); static unsigned int pid_dbg; module_param(pid_dbg, int, 0644); MODULE_PARM_DESC(pid_dbg, "Enable PID filtering debugging"); MODULE_DESCRIPTION("altera FPGA CI module"); MODULE_AUTHOR("Igor M. Liplianin <liplianin@netup.ru>"); MODULE_LICENSE("GPL"); #define ci_dbg_print(args...) \ do { \ if (ci_dbg) \ printk(KERN_DEBUG args); \ } while (0) #define pid_dbg_print(args...) \ do { \ if (pid_dbg) \ printk(KERN_DEBUG args); \ } while (0) struct altera_ci_state; struct netup_hw_pid_filter; struct fpga_internal { void *dev; struct mutex fpga_mutex;/* two CI's on the same fpga */ struct netup_hw_pid_filter *pid_filt[2]; struct altera_ci_state *state[2]; struct work_struct work; int (*fpga_rw) (void *dev, int flag, int data, int rw); int cis_used; int filts_used; int strt_wrk; }; /* stores all private variables for communication with CI */ struct altera_ci_state { struct fpga_internal *internal; struct dvb_ca_en50221 ca; int status; int nr; }; /* stores all private variables for hardware pid filtering */ struct netup_hw_pid_filter { struct fpga_internal *internal; struct dvb_demux *demux; /* save old functions */ int (*start_feed)(struct dvb_demux_feed *feed); int (*stop_feed)(struct dvb_demux_feed *feed); int status; int nr; }; /* internal params node */ struct fpga_inode { /* pointer for internal params, one for each pair of CI's */ struct fpga_internal *internal; struct fpga_inode *next_inode; }; /* first internal params */ static struct fpga_inode *fpga_first_inode; /* find chip by dev */ static struct fpga_inode *find_inode(void *dev) { struct fpga_inode *temp_chip = fpga_first_inode; if (temp_chip == NULL) return temp_chip; /* Search for the last fpga CI chip or find it by dev */ while ((temp_chip != NULL) && (temp_chip->internal->dev != dev)) temp_chip = temp_chip->next_inode; return temp_chip; } /* check demux */ static struct fpga_internal *check_filter(struct fpga_internal *temp_int, void *demux_dev, int filt_nr) { if (temp_int == NULL) return NULL; if ((temp_int->pid_filt[filt_nr]) == NULL) return NULL; if (temp_int->pid_filt[filt_nr]->demux == demux_dev) return temp_int; return NULL; } /* find chip by demux */ static struct fpga_inode *find_dinode(void *demux_dev) { struct fpga_inode *temp_chip = fpga_first_inode; struct fpga_internal *temp_int; /* * Search of the last fpga CI chip or * find it by demux */ while (temp_chip != NULL) { if (temp_chip->internal != NULL) { temp_int = temp_chip->internal; if (check_filter(temp_int, demux_dev, 0)) break; if (check_filter(temp_int, demux_dev, 1)) break; } temp_chip = temp_chip->next_inode; } return temp_chip; } /* deallocating chip */ static void remove_inode(struct fpga_internal *internal) { struct fpga_inode *prev_node = fpga_first_inode; struct fpga_inode *del_node = find_inode(internal->dev); if (del_node != NULL) { if (del_node == fpga_first_inode) { fpga_first_inode = del_node->next_inode; } else { while (prev_node->next_inode != del_node) prev_node = prev_node->next_inode; if (del_node->next_inode == NULL) prev_node->next_inode = NULL; else prev_node->next_inode = prev_node->next_inode->next_inode; } kfree(del_node); } } /* allocating new chip */ static struct fpga_inode *append_internal(struct fpga_internal *internal) { struct fpga_inode *new_node = fpga_first_inode; if (new_node == NULL) { new_node = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL); fpga_first_inode = new_node; } else { while (new_node->next_inode != NULL) new_node = new_node->next_inode; new_node->next_inode = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL); if (new_node->next_inode != NULL) new_node = new_node->next_inode; else new_node = NULL; } if (new_node != NULL) { new_node->internal = internal; new_node->next_inode = NULL; } return new_node; } static int netup_fpga_op_rw(struct fpga_internal *inter, int addr, u8 val, u8 read) { inter->fpga_rw(inter->dev, NETUP_CI_FLG_AD, addr, 0); return inter->fpga_rw(inter->dev, 0, val, read); } /* flag - mem/io, read - read/write */ static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 flag, u8 read, int addr, u8 val) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; u8 store; int mem = 0; if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); netup_fpga_op_rw(inter, NETUP_CI_ADDR0, ((addr << 1) & 0xfe), 0); netup_fpga_op_rw(inter, NETUP_CI_ADDR1, ((addr >> 7) & 0x7f), 0); store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); store &= 0x0f; store |= ((state->nr << 7) | (flag << 6)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, store, 0); mem = netup_fpga_op_rw(inter, NETUP_CI_DATA, val, read); mutex_unlock(&inter->fpga_mutex); ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__, (read) ? "read" : "write", addr, (flag == NETUP_CI_FLG_CTL) ? "ctl" : "mem", (read) ? mem : val); return mem; } static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr) { return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0); } static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data) { return altera_ci_op_cam(en50221, slot, 0, 0, addr, data); } static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr) { return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, NETUP_CI_FLG_RD, addr, 0); } static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data) { return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data); } static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; /* reasonable timeout for CI reset is 10 seconds */ unsigned long t_out = jiffies + msecs_to_jiffies(9999); int ret; ci_dbg_print("%s\n", __func__); if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, (ret & 0xcf) | (1 << (5 - state->nr)), 0); mutex_unlock(&inter->fpga_mutex); for (;;) { mdelay(50); mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); mutex_unlock(&inter->fpga_mutex); if ((ret & (1 << (5 - state->nr))) == 0) break; if (time_after(jiffies, t_out)) break; } ci_dbg_print("%s: %d msecs\n", __func__, jiffies_to_msecs(jiffies + msecs_to_jiffies(9999) - t_out)); return 0; } static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot) { /* not implemented */ return 0; } static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; int ret; ci_dbg_print("%s\n", __func__); if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, (ret & 0x0f) | (1 << (3 - state->nr)), 0); mutex_unlock(&inter->fpga_mutex); return 0; } /* work handler */ static void netup_read_ci_status(struct work_struct *work) { struct fpga_internal *inter = container_of(work, struct fpga_internal, work); int ret; ci_dbg_print("%s\n", __func__); mutex_lock(&inter->fpga_mutex); /* ack' irq */ ret = netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0, NETUP_CI_FLG_RD); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); mutex_unlock(&inter->fpga_mutex); if (inter->state[1] != NULL) { inter->state[1]->status = ((ret & 1) == 0 ? DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY : 0); ci_dbg_print("%s: setting CI[1] status = 0x%x\n", __func__, inter->state[1]->status); } if (inter->state[0] != NULL) { inter->state[0]->status = ((ret & 2) == 0 ? DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY : 0); ci_dbg_print("%s: setting CI[0] status = 0x%x\n", __func__, inter->state[0]->status); } } /* CI irq handler */ int altera_ci_irq(void *dev) { struct fpga_inode *temp_int = NULL; struct fpga_internal *inter = NULL; ci_dbg_print("%s\n", __func__); if (dev != NULL) { temp_int = find_inode(dev); if (temp_int != NULL) { inter = temp_int->internal; schedule_work(&inter->work); } } return 1; } EXPORT_SYMBOL(altera_ci_irq); static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open) { struct altera_ci_state *state = en50221->data; if (0 != slot) return -EINVAL; return state->status; } static void altera_hw_filt_release(void *main_dev, int filt_nr) { struct fpga_inode *temp_int = find_inode(main_dev); struct netup_hw_pid_filter *pid_filt = NULL; ci_dbg_print("%s\n", __func__); if (temp_int != NULL) { pid_filt = temp_int->internal->pid_filt[filt_nr - 1]; /* stored old feed controls */ pid_filt->demux->start_feed = pid_filt->start_feed; pid_filt->demux->stop_feed = pid_filt->stop_feed; if (((--(temp_int->internal->filts_used)) <= 0) && ((temp_int->internal->cis_used) <= 0)) { ci_dbg_print("%s: Actually removing\n", __func__); remove_inode(temp_int->internal); kfree(pid_filt->internal); } kfree(pid_filt); } } EXPORT_SYMBOL(altera_hw_filt_release); void altera_ci_release(void *dev, int ci_nr) { struct fpga_inode *temp_int = find_inode(dev); struct altera_ci_state *state = NULL; ci_dbg_print("%s\n", __func__); if (temp_int != NULL) { state = temp_int->internal->state[ci_nr - 1]; altera_hw_filt_release(dev, ci_nr); if (((temp_int->internal->filts_used) <= 0) && ((--(temp_int->internal->cis_used)) <= 0)) { ci_dbg_print("%s: Actually removing\n", __func__); remove_inode(temp_int->internal); kfree(state->internal); } if (state != NULL) { if (state->ca.data != NULL) dvb_ca_en50221_release(&state->ca); kfree(state); } } } EXPORT_SYMBOL(altera_ci_release); static void altera_pid_control(struct netup_hw_pid_filter *pid_filt, u16 pid, int onoff) { struct fpga_internal *inter = pid_filt->internal; u8 store = 0; /* pid 0-0x1f always enabled, don't touch them */ if ((pid == 0x2000) || (pid < 0x20)) return; mutex_lock(&inter->fpga_mutex); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, (pid >> 3) & 0xff, 0); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1, ((pid >> 11) & 0x03) | (pid_filt->nr << 2), 0); store = netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, 0, NETUP_CI_FLG_RD); if (onoff)/* 0 - on, 1 - off */ store |= (1 << (pid & 7)); else store &= ~(1 << (pid & 7)); netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, store, 0); mutex_unlock(&inter->fpga_mutex); pid_dbg_print("%s: (%d) set pid: %5d 0x%04x '%s'\n", __func__, pid_filt->nr, pid, pid, onoff ? "off" : "on"); } static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt, int filt_nr, int onoff) { struct fpga_internal *inter = pid_filt->internal; u8 store = 0; int i; pid_dbg_print("%s: pid_filt->nr[%d] now %s\n", __func__, pid_filt->nr, onoff ? "off" : "on"); if (onoff)/* 0 - on, 1 - off */ store = 0xff;/* ignore pid */ else store = 0;/* enable pid */ mutex_lock(&inter->fpga_mutex); for (i = 0; i < 1024; i++) { netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, i & 0xff, 0); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1, ((i >> 8) & 0x03) | (pid_filt->nr << 2), 0); /* pid 0-0x1f always enabled */ netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, (i > 3 ? store : 0), 0); } mutex_unlock(&inter->fpga_mutex); } static int altera_pid_feed_control(void *demux_dev, int filt_nr, struct dvb_demux_feed *feed, int onoff) { struct fpga_inode *temp_int = find_dinode(demux_dev); struct fpga_internal *inter = temp_int->internal; struct netup_hw_pid_filter *pid_filt = inter->pid_filt[filt_nr - 1]; altera_pid_control(pid_filt, feed->pid, onoff ? 0 : 1); /* call old feed proc's */ if (onoff) pid_filt->start_feed(feed); else pid_filt->stop_feed(feed); if (feed->pid == 0x2000) altera_toggle_fullts_streaming(pid_filt, filt_nr, onoff ? 0 : 1); return 0; } EXPORT_SYMBOL(altera_pid_feed_control); static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num) { altera_pid_feed_control(feed->demux, num, feed, 1); return 0; } static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num) { altera_pid_feed_control(feed->demux, num, feed, 0); return 0; } static int altera_ci_start_feed_1(struct dvb_demux_feed *feed) { return altera_ci_start_feed(feed, 1); } static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed) { return altera_ci_stop_feed(feed, 1); } static int altera_ci_start_feed_2(struct dvb_demux_feed *feed) { return altera_ci_start_feed(feed, 2); } static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed) { return altera_ci_stop_feed(feed, 2); } static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) { struct netup_hw_pid_filter *pid_filt = NULL; struct fpga_inode *temp_int = find_inode(config->dev); struct fpga_internal *inter = NULL; int ret = 0; pid_filt = kzalloc(sizeof(struct netup_hw_pid_filter), GFP_KERNEL); ci_dbg_print("%s\n", __func__); if (!pid_filt) { ret = -ENOMEM; goto err; } if (temp_int != NULL) { inter = temp_int->internal; (inter->filts_used)++; ci_dbg_print("%s: Find Internal Structure!\n", __func__); } else { inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL); if (!inter) { ret = -ENOMEM; goto err; } temp_int = append_internal(inter); inter->filts_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; mutex_init(&inter->fpga_mutex); inter->strt_wrk = 1; ci_dbg_print("%s: Create New Internal Structure!\n", __func__); } ci_dbg_print("%s: setting hw pid filter = %p for ci = %d\n", __func__, pid_filt, hw_filt_nr - 1); inter->pid_filt[hw_filt_nr - 1] = pid_filt; pid_filt->demux = config->demux; pid_filt->internal = inter; pid_filt->nr = hw_filt_nr - 1; /* store old feed controls */ pid_filt->start_feed = config->demux->start_feed; pid_filt->stop_feed = config->demux->stop_feed; /* replace with new feed controls */ if (hw_filt_nr == 1) { pid_filt->demux->start_feed = altera_ci_start_feed_1; pid_filt->demux->stop_feed = altera_ci_stop_feed_1; } else if (hw_filt_nr == 2) { pid_filt->demux->start_feed = altera_ci_start_feed_2; pid_filt->demux->stop_feed = altera_ci_stop_feed_2; } altera_toggle_fullts_streaming(pid_filt, 0, 1); return 0; err: ci_dbg_print("%s: Can't init hardware filter: Error %d\n", __func__, ret); kfree(pid_filt); return ret; } EXPORT_SYMBOL(altera_hw_filt_init); int altera_ci_init(struct altera_ci_config *config, int ci_nr) { struct altera_ci_state *state; struct fpga_inode *temp_int = find_inode(config->dev); struct fpga_internal *inter = NULL; int ret = 0; u8 store = 0; state = kzalloc(sizeof(struct altera_ci_state), GFP_KERNEL); ci_dbg_print("%s\n", __func__); if (!state) { ret = -ENOMEM; goto err; } if (temp_int != NULL) { inter = temp_int->internal; (inter->cis_used)++; inter->fpga_rw = config->fpga_rw; ci_dbg_print("%s: Find Internal Structure!\n", __func__); } else { inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL); if (!inter) { ret = -ENOMEM; goto err; } temp_int = append_internal(inter); inter->cis_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; mutex_init(&inter->fpga_mutex); inter->strt_wrk = 1; ci_dbg_print("%s: Create New Internal Structure!\n", __func__); } ci_dbg_print("%s: setting state = %p for ci = %d\n", __func__, state, ci_nr - 1); state->internal = inter; state->nr = ci_nr - 1; state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = altera_ci_read_attribute_mem; state->ca.write_attribute_mem = altera_ci_write_attribute_mem; state->ca.read_cam_control = altera_ci_read_cam_ctl; state->ca.write_cam_control = altera_ci_write_cam_ctl; state->ca.slot_reset = altera_ci_slot_reset; state->ca.slot_shutdown = altera_ci_slot_shutdown; state->ca.slot_ts_enable = altera_ci_slot_ts_ctl; state->ca.poll_slot_status = altera_poll_ci_slot_status; state->ca.data = state; ret = dvb_ca_en50221_init(config->adapter, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) goto err; inter->state[ci_nr - 1] = state; altera_hw_filt_init(config, ci_nr); if (inter->strt_wrk) { INIT_WORK(&inter->work, netup_read_ci_status); inter->strt_wrk = 0; } ci_dbg_print("%s: CI initialized!\n", __func__); mutex_lock(&inter->fpga_mutex); /* Enable div */ netup_fpga_op_rw(inter, NETUP_CI_TSA_DIV, 0x0, 0); netup_fpga_op_rw(inter, NETUP_CI_TSB_DIV, 0x0, 0); /* enable TS out */ store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD); store |= (3 << 4); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); ret = netup_fpga_op_rw(inter, NETUP_CI_REVISION, 0, NETUP_CI_FLG_RD); /* enable irq */ netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0x44, 0); mutex_unlock(&inter->fpga_mutex); ci_dbg_print("%s: NetUP CI Revision = 0x%x\n", __func__, ret); schedule_work(&inter->work); return 0; err: ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); return ret; } EXPORT_SYMBOL(altera_ci_init); int altera_ci_tuner_reset(void *dev, int ci_nr) { struct fpga_inode *temp_int = find_inode(dev); struct fpga_internal *inter = NULL; u8 store; ci_dbg_print("%s\n", __func__); if (temp_int == NULL) return -1; if (temp_int->internal == NULL) return -1; inter = temp_int->internal; mutex_lock(&inter->fpga_mutex); store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD); store &= ~(4 << (2 - ci_nr)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); msleep(100); store |= (4 << (2 - ci_nr)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); mutex_unlock(&inter->fpga_mutex); return 0; } EXPORT_SYMBOL(altera_ci_tuner_reset);
gpl-2.0
XileForce/Vindicator-S6-Uni-Old
tools/perf/arch/x86/util/dwarf-regs.c
3713
1796
/* * dwarf-regs.c : Mapping of DWARF debug register numbers into register names. * Extracted from probe-finder.c * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <stddef.h> #include <dwarf-regs.h> /* * Generic dwarf analysis helpers */ #define X86_32_MAX_REGS 8 const char *x86_32_regs_table[X86_32_MAX_REGS] = { "%ax", "%cx", "%dx", "%bx", "$stack", /* Stack address instead of %sp */ "%bp", "%si", "%di", }; #define X86_64_MAX_REGS 16 const char *x86_64_regs_table[X86_64_MAX_REGS] = { "%ax", "%dx", "%cx", "%bx", "%si", "%di", "%bp", "%sp", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", }; /* TODO: switching by dwarf address size */ #ifdef __x86_64__ #define ARCH_MAX_REGS X86_64_MAX_REGS #define arch_regs_table x86_64_regs_table #else #define ARCH_MAX_REGS X86_32_MAX_REGS #define arch_regs_table x86_32_regs_table #endif /* Return architecture dependent register string (for kprobe-tracer) */ const char *get_arch_regstr(unsigned int n) { return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL; }
gpl-2.0
ARMP/bproj-black
drivers/isdn/hardware/avm/b1.c
4225
21011
/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Common module for AVM B1 cards. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/capi.h> #include <linux/kernelcapi.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/netdevice.h> #include <linux/isdn/capilli.h> #include "avmcard.h" #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> static char *revision = "$Revision: 1.1.2.2 $"; /* ------------------------------------------------------------- */ MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ int b1_irq_table[16] = {0, 0, 0, 192, /* irq 3 */ 32, /* irq 4 */ 160, /* irq 5 */ 96, /* irq 6 */ 224, /* irq 7 */ 0, 64, /* irq 9 */ 80, /* irq 10 */ 208, /* irq 11 */ 48, /* irq 12 */ 0, 0, 112, /* irq 15 */ }; /* ------------------------------------------------------------- */ avmcard *b1_alloc_card(int nr_controllers) { avmcard *card; avmctrl_info *cinfo; int i; card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return NULL; cinfo = kzalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL); if (!cinfo) { kfree(card); return NULL; } card->ctrlinfo = cinfo; for (i = 0; i < nr_controllers; i++) { INIT_LIST_HEAD(&cinfo[i].ncci_head); cinfo[i].card = card; } spin_lock_init(&card->lock); card->nr_controllers = nr_controllers; return card; } /* ------------------------------------------------------------- */ void b1_free_card(avmcard *card) { kfree(card->ctrlinfo); kfree(card); } /* ------------------------------------------------------------- */ int b1_detect(unsigned int base, enum avmcardtype cardtype) { int onoff, i; /* * Statusregister 0000 00xx */ if ((inb(base + B1_INSTAT) & 0xfc) || (inb(base + B1_OUTSTAT) & 0xfc)) return 1; /* * Statusregister 0000 001x */ b1outp(base, B1_INSTAT, 0x2); /* enable irq */ /* b1outp(base, B1_OUTSTAT, 0x2); */ if ((inb(base + B1_INSTAT) & 0xfe) != 0x2 /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */) return 2; /* * Statusregister 0000 000x */ b1outp(base, B1_INSTAT, 0x0); /* disable irq */ b1outp(base, B1_OUTSTAT, 0x0); if ((inb(base + B1_INSTAT) & 0xfe) || (inb(base + B1_OUTSTAT) & 0xfe)) return 3; for (onoff = !0, i= 0; i < 10 ; i++) { b1_set_test_bit(base, cardtype, onoff); if (b1_get_test_bit(base, cardtype) != onoff) return 4; onoff = !onoff; } if (cardtype == avm_m1) return 0; if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01) return 5; return 0; } void b1_getrevision(avmcard *card) { card->class = inb(card->port + B1_ANALYSE); card->revision = inb(card->port + B1_REVISION); } #define FWBUF_SIZE 256 int b1_load_t4file(avmcard *card, capiloaddatapart * t4file) { unsigned char buf[FWBUF_SIZE]; unsigned char *dp; int i, left; unsigned int base = card->port; dp = t4file->data; left = t4file->len; while (left > FWBUF_SIZE) { if (t4file->user) { if (copy_from_user(buf, dp, FWBUF_SIZE)) return -EFAULT; } else { memcpy(buf, dp, FWBUF_SIZE); } for (i = 0; i < FWBUF_SIZE; i++) if (b1_save_put_byte(base, buf[i]) < 0) { printk(KERN_ERR "%s: corrupted firmware file ?\n", card->name); return -EIO; } left -= FWBUF_SIZE; dp += FWBUF_SIZE; } if (left) { if (t4file->user) { if (copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); } for (i = 0; i < left; i++) if (b1_save_put_byte(base, buf[i]) < 0) { printk(KERN_ERR "%s: corrupted firmware file ?\n", card->name); return -EIO; } } return 0; } int b1_load_config(avmcard *card, capiloaddatapart * config) { unsigned char buf[FWBUF_SIZE]; unsigned char *dp; unsigned int base = card->port; int i, j, left; dp = config->data; left = config->len; if (left) { b1_put_byte(base, SEND_CONFIG); b1_put_word(base, 1); b1_put_byte(base, SEND_CONFIG); b1_put_word(base, left); } while (left > FWBUF_SIZE) { if (config->user) { if (copy_from_user(buf, dp, FWBUF_SIZE)) return -EFAULT; } else { memcpy(buf, dp, FWBUF_SIZE); } for (i = 0; i < FWBUF_SIZE; ) { b1_put_byte(base, SEND_CONFIG); for (j=0; j < 4; j++) { b1_put_byte(base, buf[i++]); } } left -= FWBUF_SIZE; dp += FWBUF_SIZE; } if (left) { if (config->user) { if (copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); } for (i = 0; i < left; ) { b1_put_byte(base, SEND_CONFIG); for (j=0; j < 4; j++) { if (i < left) b1_put_byte(base, buf[i++]); else b1_put_byte(base, 0); } } } return 0; } int b1_loaded(avmcard *card) { unsigned int base = card->port; unsigned long stop; unsigned char ans; unsigned long tout = 2; for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) { if (b1_tx_empty(base)) break; } if (!b1_tx_empty(base)) { printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n", card->name); return 0; } b1_put_byte(base, SEND_POLL); for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) { if (b1_rx_full(base)) { if ((ans = b1_get_byte(base)) == RECEIVE_POLL) { return 1; } printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n", card->name, ans); return 0; } } printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name); return 0; } /* ------------------------------------------------------------- */ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; int retval; b1_reset(port); if ((retval = b1_load_t4file(card, &data->firmware))) { b1_reset(port); printk(KERN_ERR "%s: failed to load t4file!!\n", card->name); return retval; } b1_disable_irq(port); if (data->configuration.len > 0 && data->configuration.data) { if ((retval = b1_load_config(card, &data->configuration))) { b1_reset(port); printk(KERN_ERR "%s: failed to load config!!\n", card->name); return retval; } } if (!b1_loaded(card)) { printk(KERN_ERR "%s: failed to load t4file.\n", card->name); return -EIO; } spin_lock_irqsave(&card->lock, flags); b1_setinterrupt(port, card->irq, card->cardtype); b1_put_byte(port, SEND_INIT); b1_put_word(port, CAPI_MAXAPPL); b1_put_word(port, AVM_NCCI_PER_CHANNEL*2); b1_put_word(port, ctrl->cnr - 1); spin_unlock_irqrestore(&card->lock, flags); return 0; } void b1_reset_ctr(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; b1_reset(port); b1_reset(port); memset(cinfo->version, 0, sizeof(cinfo->version)); spin_lock_irqsave(&card->lock, flags); capilib_release(&cinfo->ncci_head); spin_unlock_irqrestore(&card->lock, flags); capi_ctr_down(ctrl); } void b1_register_appl(struct capi_ctr *ctrl, u16 appl, capi_register_params *rp) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; int nconn, want = rp->level3cnt; if (want > 0) nconn = want; else nconn = ctrl->profile.nbchannel * -want; if (nconn == 0) nconn = ctrl->profile.nbchannel; spin_lock_irqsave(&card->lock, flags); b1_put_byte(port, SEND_REGISTER); b1_put_word(port, appl); b1_put_word(port, 1024 * (nconn+1)); b1_put_word(port, nconn); b1_put_word(port, rp->datablkcnt); b1_put_word(port, rp->datablklen); spin_unlock_irqrestore(&card->lock, flags); } void b1_release_appl(struct capi_ctr *ctrl, u16 appl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; spin_lock_irqsave(&card->lock, flags); capilib_release_appl(&cinfo->ncci_head, appl); b1_put_byte(port, SEND_RELEASE); b1_put_word(port, appl); spin_unlock_irqrestore(&card->lock, flags); } u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; u16 len = CAPIMSG_LEN(skb->data); u8 cmd = CAPIMSG_COMMAND(skb->data); u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data); u16 dlen, retval; spin_lock_irqsave(&card->lock, flags); if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) { retval = capilib_data_b3_req(&cinfo->ncci_head, CAPIMSG_APPID(skb->data), CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); if (retval != CAPI_NOERROR) { spin_unlock_irqrestore(&card->lock, flags); return retval; } dlen = CAPIMSG_DATALEN(skb->data); b1_put_byte(port, SEND_DATA_B3_REQ); b1_put_slice(port, skb->data, len); b1_put_slice(port, skb->data + len, dlen); } else { b1_put_byte(port, SEND_MESSAGE); b1_put_slice(port, skb->data, len); } spin_unlock_irqrestore(&card->lock, flags); dev_kfree_skb_any(skb); return CAPI_NOERROR; } /* ------------------------------------------------------------- */ void b1_parse_version(avmctrl_info *cinfo) { struct capi_ctr *ctrl = &cinfo->capi_ctrl; avmcard *card = cinfo->card; capi_profile *profp; u8 *dversion; u8 flag; int i, j; for (j = 0; j < AVM_MAXVERSION; j++) cinfo->version[j] = "\0\0" + 1; for (i = 0, j = 0; j < AVM_MAXVERSION && i < cinfo->versionlen; j++, i += cinfo->versionbuf[i] + 1) cinfo->version[j] = &cinfo->versionbuf[i + 1]; strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial)); memcpy(&ctrl->profile, cinfo->version[VER_PROFILE],sizeof(capi_profile)); strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu)); dversion = cinfo->version[VER_DRIVER]; ctrl->version.majorversion = 2; ctrl->version.minorversion = 0; ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4); ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf); ctrl->version.minormanuversion = (dversion[3] - '0') << 4; ctrl->version.minormanuversion |= (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf); profp = &ctrl->profile; flag = ((u8 *)(profp->manu))[1]; switch (flag) { case 0: if (cinfo->version[VER_CARDTYPE]) strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]); else strcpy(cinfo->cardname, "B1"); break; case 3: strcpy(cinfo->cardname,"PCMCIA B"); break; case 4: strcpy(cinfo->cardname,"PCMCIA M1"); break; case 5: strcpy(cinfo->cardname,"PCMCIA M2"); break; case 6: strcpy(cinfo->cardname,"B1 V3.0"); break; case 7: strcpy(cinfo->cardname,"B1 PCI"); break; default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break; } printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n", card->name, ctrl->cnr, cinfo->cardname); flag = ((u8 *)(profp->manu))[3]; if (flag) printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n", card->name, ctrl->cnr, (flag & 0x01) ? " DSS1" : "", (flag & 0x02) ? " CT1" : "", (flag & 0x04) ? " VN3" : "", (flag & 0x08) ? " NI1" : "", (flag & 0x10) ? " AUSTEL" : "", (flag & 0x20) ? " ESS" : "", (flag & 0x40) ? " 1TR6" : "" ); flag = ((u8 *)(profp->manu))[5]; if (flag) printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n", card->name, ctrl->cnr, (flag & 0x01) ? " point to point" : "", (flag & 0x02) ? " point to multipoint" : "", (flag & 0x08) ? " leased line without D-channel" : "", (flag & 0x04) ? " leased line with D-channel" : "" ); } /* ------------------------------------------------------------- */ irqreturn_t b1_interrupt(int interrupt, void *devptr) { avmcard *card = devptr; avmctrl_info *cinfo = &card->ctrlinfo[0]; struct capi_ctr *ctrl = &cinfo->capi_ctrl; unsigned char b1cmd; struct sk_buff *skb; unsigned ApplId; unsigned MsgLen; unsigned DataB3Len; unsigned NCCI; unsigned WindowSize; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (!b1_rx_full(card->port)) { spin_unlock_irqrestore(&card->lock, flags); return IRQ_NONE; } b1cmd = b1_get_byte(card->port); switch (b1cmd) { case RECEIVE_DATA_B3_IND: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); DataB3Len = b1_get_slice(card->port, card->databuf); spin_unlock_irqrestore(&card->lock, flags); if (MsgLen < 30) { /* not CAPI 64Bit */ memset(card->msgbuf+MsgLen, 0, 30-MsgLen); MsgLen = 30; CAPIMSG_SETLEN(card->msgbuf, 30); } if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len); capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_MESSAGE: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); spin_unlock_irqrestore(&card->lock, flags); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) capilib_data_b3_conf(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); spin_unlock_irqrestore(&card->lock, flags); capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_NEW_NCCI: ApplId = b1_get_word(card->port); NCCI = b1_get_word(card->port); WindowSize = b1_get_word(card->port); capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize); spin_unlock_irqrestore(&card->lock, flags); break; case RECEIVE_FREE_NCCI: ApplId = b1_get_word(card->port); NCCI = b1_get_word(card->port); if (NCCI != 0xffffffff) capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI); spin_unlock_irqrestore(&card->lock, flags); break; case RECEIVE_START: /* b1_put_byte(card->port, SEND_POLLACK); */ spin_unlock_irqrestore(&card->lock, flags); capi_ctr_resume_output(ctrl); break; case RECEIVE_STOP: spin_unlock_irqrestore(&card->lock, flags); capi_ctr_suspend_output(ctrl); break; case RECEIVE_INIT: cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf); spin_unlock_irqrestore(&card->lock, flags); b1_parse_version(cinfo); printk(KERN_INFO "%s: %s-card (%s) now active\n", card->name, cinfo->version[VER_CARDTYPE], cinfo->version[VER_DRIVER]); capi_ctr_ready(ctrl); break; case RECEIVE_TASK_READY: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); spin_unlock_irqrestore(&card->lock, flags); card->msgbuf[MsgLen] = 0; while ( MsgLen > 0 && ( card->msgbuf[MsgLen-1] == '\n' || card->msgbuf[MsgLen-1] == '\r')) { card->msgbuf[MsgLen-1] = 0; MsgLen--; } printk(KERN_INFO "%s: task %d \"%s\" ready.\n", card->name, ApplId, card->msgbuf); break; case RECEIVE_DEBUGMSG: MsgLen = b1_get_slice(card->port, card->msgbuf); spin_unlock_irqrestore(&card->lock, flags); card->msgbuf[MsgLen] = 0; while ( MsgLen > 0 && ( card->msgbuf[MsgLen-1] == '\n' || card->msgbuf[MsgLen-1] == '\r')) { card->msgbuf[MsgLen-1] = 0; MsgLen--; } printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf); break; case 0xff: spin_unlock_irqrestore(&card->lock, flags); printk(KERN_ERR "%s: card removed ?\n", card->name); return IRQ_NONE; default: spin_unlock_irqrestore(&card->lock, flags); printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n", card->name, b1cmd); return IRQ_HANDLED; } return IRQ_HANDLED; } /* ------------------------------------------------------------- */ static int b1ctl_proc_show(struct seq_file *m, void *v) { struct capi_ctr *ctrl = m->private; avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; u8 flag; char *s; seq_printf(m, "%-16s %s\n", "name", card->name); seq_printf(m, "%-16s 0x%x\n", "io", card->port); seq_printf(m, "%-16s %d\n", "irq", card->irq); switch (card->cardtype) { case avm_b1isa: s = "B1 ISA"; break; case avm_b1pci: s = "B1 PCI"; break; case avm_b1pcmcia: s = "B1 PCMCIA"; break; case avm_m1: s = "M1"; break; case avm_m2: s = "M2"; break; case avm_t1isa: s = "T1 ISA (HEMA)"; break; case avm_t1pci: s = "T1 PCI"; break; case avm_c4: s = "C4"; break; case avm_c2: s = "C2"; break; default: s = "???"; break; } seq_printf(m, "%-16s %s\n", "type", s); if (card->cardtype == avm_t1isa) seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr); if ((s = cinfo->version[VER_DRIVER]) != NULL) seq_printf(m, "%-16s %s\n", "ver_driver", s); if ((s = cinfo->version[VER_CARDTYPE]) != NULL) seq_printf(m, "%-16s %s\n", "ver_cardtype", s); if ((s = cinfo->version[VER_SERIAL]) != NULL) seq_printf(m, "%-16s %s\n", "ver_serial", s); if (card->cardtype != avm_m1) { flag = ((u8 *)(ctrl->profile.manu))[3]; if (flag) seq_printf(m, "%-16s%s%s%s%s%s%s%s\n", "protocol", (flag & 0x01) ? " DSS1" : "", (flag & 0x02) ? " CT1" : "", (flag & 0x04) ? " VN3" : "", (flag & 0x08) ? " NI1" : "", (flag & 0x10) ? " AUSTEL" : "", (flag & 0x20) ? " ESS" : "", (flag & 0x40) ? " 1TR6" : "" ); } if (card->cardtype != avm_m1) { flag = ((u8 *)(ctrl->profile.manu))[5]; if (flag) seq_printf(m, "%-16s%s%s%s%s\n", "linetype", (flag & 0x01) ? " point to point" : "", (flag & 0x02) ? " point to multipoint" : "", (flag & 0x08) ? " leased line without D-channel" : "", (flag & 0x04) ? " leased line with D-channel" : "" ); } seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname); return 0; } static int b1ctl_proc_open(struct inode *inode, struct file *file) { return single_open(file, b1ctl_proc_show, PDE(inode)->data); } const struct file_operations b1ctl_proc_fops = { .owner = THIS_MODULE, .open = b1ctl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; EXPORT_SYMBOL(b1ctl_proc_fops); /* ------------------------------------------------------------- */ #ifdef CONFIG_PCI avmcard_dmainfo * avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize) { avmcard_dmainfo *p; void *buf; p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL); if (!p) { printk(KERN_WARNING "%s: no memory.\n", name); goto err; } p->recvbuf.size = rsize; buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr); if (!buf) { printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name); goto err_kfree; } p->recvbuf.dmabuf = buf; p->sendbuf.size = ssize; buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr); if (!buf) { printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name); goto err_free_consistent; } p->sendbuf.dmabuf = buf; skb_queue_head_init(&p->send_queue); return p; err_free_consistent: pci_free_consistent(p->pcidev, p->recvbuf.size, p->recvbuf.dmabuf, p->recvbuf.dmaaddr); err_kfree: kfree(p); err: return NULL; } void avmcard_dma_free(avmcard_dmainfo *p) { pci_free_consistent(p->pcidev, p->recvbuf.size, p->recvbuf.dmabuf, p->recvbuf.dmaaddr); pci_free_consistent(p->pcidev, p->sendbuf.size, p->sendbuf.dmabuf, p->sendbuf.dmaaddr); skb_queue_purge(&p->send_queue); kfree(p); } EXPORT_SYMBOL(avmcard_dma_alloc); EXPORT_SYMBOL(avmcard_dma_free); #endif EXPORT_SYMBOL(b1_irq_table); EXPORT_SYMBOL(b1_alloc_card); EXPORT_SYMBOL(b1_free_card); EXPORT_SYMBOL(b1_detect); EXPORT_SYMBOL(b1_getrevision); EXPORT_SYMBOL(b1_load_t4file); EXPORT_SYMBOL(b1_load_config); EXPORT_SYMBOL(b1_loaded); EXPORT_SYMBOL(b1_load_firmware); EXPORT_SYMBOL(b1_reset_ctr); EXPORT_SYMBOL(b1_register_appl); EXPORT_SYMBOL(b1_release_appl); EXPORT_SYMBOL(b1_send_message); EXPORT_SYMBOL(b1_parse_version); EXPORT_SYMBOL(b1_interrupt); static int __init b1_init(void) { char *p; char rev[32]; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p-1) = 0; } else strcpy(rev, "1.0"); printk(KERN_INFO "b1: revision %s\n", rev); return 0; } static void __exit b1_exit(void) { } module_init(b1_init); module_exit(b1_exit);
gpl-2.0
nxnfufunezn/linux
sound/soc/samsung/ln2440sbc_alc650.c
4481
1702
/* * SoC audio for ln2440sbc * * Copyright 2007 KonekTel, a.s. * Author: Ivan Kuten * ivan.kuten@promwad.com * * Heavily based on smdk2443_wm9710.c * Copyright 2007 Wolfson Microelectronics PLC. * Author: Graeme Gregory * graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <sound/soc.h> static struct snd_soc_card ln2440sbc; static struct snd_soc_dai_link ln2440sbc_dai[] = { { .name = "AC97", .stream_name = "AC97 HiFi", .cpu_dai_name = "samsung-ac97", .codec_dai_name = "ac97-hifi", .codec_name = "ac97-codec", .platform_name = "samsung-ac97", }, }; static struct snd_soc_card ln2440sbc = { .name = "LN2440SBC", .owner = THIS_MODULE, .dai_link = ln2440sbc_dai, .num_links = ARRAY_SIZE(ln2440sbc_dai), }; static struct platform_device *ln2440sbc_snd_ac97_device; static int __init ln2440sbc_init(void) { int ret; ln2440sbc_snd_ac97_device = platform_device_alloc("soc-audio", -1); if (!ln2440sbc_snd_ac97_device) return -ENOMEM; platform_set_drvdata(ln2440sbc_snd_ac97_device, &ln2440sbc); ret = platform_device_add(ln2440sbc_snd_ac97_device); if (ret) platform_device_put(ln2440sbc_snd_ac97_device); return ret; } static void __exit ln2440sbc_exit(void) { platform_device_unregister(ln2440sbc_snd_ac97_device); } module_init(ln2440sbc_init); module_exit(ln2440sbc_exit); /* Module information */ MODULE_AUTHOR("Ivan Kuten"); MODULE_DESCRIPTION("ALSA SoC ALC650 LN2440SBC"); MODULE_LICENSE("GPL");
gpl-2.0
sohkis/android_kernel_lge_hammerhead
arch/arm/mach-orion5x/db88f5281-setup.c
4737
9639
/* * arch/arm/mach-orion5x/db88f5281-setup.c * * Marvell Orion-2 Development Board Setup * * Maintainer: Tzachi Perelstein <tzachi@marvell.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/timer.h> #include <linux/mv643xx_eth.h> #include <linux/i2c.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include <plat/orion_nand.h> #include "common.h" #include "mpp.h" /***************************************************************************** * DB-88F5281 on board devices ****************************************************************************/ /* * 512K NOR flash Device bus boot chip select */ #define DB88F5281_NOR_BOOT_BASE 0xf4000000 #define DB88F5281_NOR_BOOT_SIZE SZ_512K /* * 7-Segment on Device bus chip select 0 */ #define DB88F5281_7SEG_BASE 0xfa000000 #define DB88F5281_7SEG_SIZE SZ_1K /* * 32M NOR flash on Device bus chip select 1 */ #define DB88F5281_NOR_BASE 0xfc000000 #define DB88F5281_NOR_SIZE SZ_32M /* * 32M NAND flash on Device bus chip select 2 */ #define DB88F5281_NAND_BASE 0xfa800000 #define DB88F5281_NAND_SIZE SZ_1K /* * PCI */ #define DB88F5281_PCI_SLOT0_OFFS 7 #define DB88F5281_PCI_SLOT0_IRQ_PIN 12 #define DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN 13 /***************************************************************************** * 512M NOR Flash on Device bus Boot CS ****************************************************************************/ static struct physmap_flash_data db88f5281_boot_flash_data = { .width = 1, /* 8 bit bus width */ }; static struct resource db88f5281_boot_flash_resource = { .flags = IORESOURCE_MEM, .start = DB88F5281_NOR_BOOT_BASE, .end = DB88F5281_NOR_BOOT_BASE + DB88F5281_NOR_BOOT_SIZE - 1, }; static struct platform_device db88f5281_boot_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &db88f5281_boot_flash_data, }, .num_resources = 1, .resource = &db88f5281_boot_flash_resource, }; /***************************************************************************** * 32M NOR Flash on Device bus CS1 ****************************************************************************/ static struct physmap_flash_data db88f5281_nor_flash_data = { .width = 4, /* 32 bit bus width */ }; static struct resource db88f5281_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = DB88F5281_NOR_BASE, .end = DB88F5281_NOR_BASE + DB88F5281_NOR_SIZE - 1, }; static struct platform_device db88f5281_nor_flash = { .name = "physmap-flash", .id = 1, .dev = { .platform_data = &db88f5281_nor_flash_data, }, .num_resources = 1, .resource = &db88f5281_nor_flash_resource, }; /***************************************************************************** * 32M NAND Flash on Device bus CS2 ****************************************************************************/ static struct mtd_partition db88f5281_nand_parts[] = { { .name = "kernel", .offset = 0, .size = SZ_2M, }, { .name = "root", .offset = SZ_2M, .size = (SZ_16M - SZ_2M), }, { .name = "user", .offset = SZ_16M, .size = SZ_8M, }, { .name = "recovery", .offset = (SZ_16M + SZ_8M), .size = SZ_8M, }, }; static struct resource db88f5281_nand_resource = { .flags = IORESOURCE_MEM, .start = DB88F5281_NAND_BASE, .end = DB88F5281_NAND_BASE + DB88F5281_NAND_SIZE - 1, }; static struct orion_nand_data db88f5281_nand_data = { .parts = db88f5281_nand_parts, .nr_parts = ARRAY_SIZE(db88f5281_nand_parts), .cle = 0, .ale = 1, .width = 8, }; static struct platform_device db88f5281_nand_flash = { .name = "orion_nand", .id = -1, .dev = { .platform_data = &db88f5281_nand_data, }, .resource = &db88f5281_nand_resource, .num_resources = 1, }; /***************************************************************************** * 7-Segment on Device bus CS0 * Dummy counter every 2 sec ****************************************************************************/ static void __iomem *db88f5281_7seg; static struct timer_list db88f5281_timer; static void db88f5281_7seg_event(unsigned long data) { static int count = 0; writel(0, db88f5281_7seg + (count << 4)); count = (count + 1) & 7; mod_timer(&db88f5281_timer, jiffies + 2 * HZ); } static int __init db88f5281_7seg_init(void) { if (machine_is_db88f5281()) { db88f5281_7seg = ioremap(DB88F5281_7SEG_BASE, DB88F5281_7SEG_SIZE); if (!db88f5281_7seg) { printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n"); return -EIO; } setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0); mod_timer(&db88f5281_timer, jiffies + 2 * HZ); } return 0; } __initcall(db88f5281_7seg_init); /***************************************************************************** * PCI ****************************************************************************/ void __init db88f5281_pci_preinit(void) { int pin; /* * Configure PCI GPIO IRQ pins */ pin = DB88F5281_PCI_SLOT0_IRQ_PIN; if (gpio_request(pin, "PCI Int1") == 0) { if (gpio_direction_input(pin) == 0) { irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); } else { printk(KERN_ERR "db88f5281_pci_preinit failed to " "set_irq_type pin %d\n", pin); gpio_free(pin); } } else { printk(KERN_ERR "db88f5281_pci_preinit failed to gpio_request %d\n", pin); } pin = DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN; if (gpio_request(pin, "PCI Int2") == 0) { if (gpio_direction_input(pin) == 0) { irq_set_irq_type(gpio_to_irq(pin), IRQ_TYPE_LEVEL_LOW); } else { printk(KERN_ERR "db88f5281_pci_preinit failed " "to set_irq_type pin %d\n", pin); gpio_free(pin); } } else { printk(KERN_ERR "db88f5281_pci_preinit failed to gpio_request %d\n", pin); } } static int __init db88f5281_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; /* * PCI IRQs are connected via GPIOs. */ switch (slot - DB88F5281_PCI_SLOT0_OFFS) { case 0: return gpio_to_irq(DB88F5281_PCI_SLOT0_IRQ_PIN); case 1: case 2: return gpio_to_irq(DB88F5281_PCI_SLOT1_SLOT2_IRQ_PIN); default: return -1; } } static struct hw_pci db88f5281_pci __initdata = { .nr_controllers = 2, .preinit = db88f5281_pci_preinit, .swizzle = pci_std_swizzle, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = db88f5281_pci_map_irq, }; static int __init db88f5281_pci_init(void) { if (machine_is_db88f5281()) pci_common_init(&db88f5281_pci); return 0; } subsys_initcall(db88f5281_pci_init); /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data db88f5281_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /***************************************************************************** * RTC DS1339 on I2C bus ****************************************************************************/ static struct i2c_board_info __initdata db88f5281_i2c_rtc = { I2C_BOARD_INFO("ds1339", 0x68), }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int db88f5281_mpp_modes[] __initdata = { MPP0_GPIO, /* USB Over Current */ MPP1_GPIO, /* USB Vbat input */ MPP2_PCI_ARB, /* PCI_REQn[2] */ MPP3_PCI_ARB, /* PCI_GNTn[2] */ MPP4_PCI_ARB, /* PCI_REQn[3] */ MPP5_PCI_ARB, /* PCI_GNTn[3] */ MPP6_GPIO, /* JP0, CON17.2 */ MPP7_GPIO, /* JP1, CON17.1 */ MPP8_GPIO, /* JP2, CON11.2 */ MPP9_GPIO, /* JP3, CON11.3 */ MPP10_GPIO, /* RTC int */ MPP11_GPIO, /* Baud Rate Generator */ MPP12_GPIO, /* PCI int 1 */ MPP13_GPIO, /* PCI int 2 */ MPP14_NAND, /* NAND_REn[2] */ MPP15_NAND, /* NAND_WEn[2] */ MPP16_UART, /* UART1_RX */ MPP17_UART, /* UART1_TX */ MPP18_UART, /* UART1_CTSn */ MPP19_UART, /* UART1_RTSn */ 0, }; static void __init db88f5281_init(void) { /* * Basic Orion setup. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(db88f5281_mpp_modes); writel(0, MPP_DEV_CTRL); /* DEV_D[31:16] */ /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&db88f5281_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); orion5x_uart1_init(); orion5x_setup_dev_boot_win(DB88F5281_NOR_BOOT_BASE, DB88F5281_NOR_BOOT_SIZE); platform_device_register(&db88f5281_boot_flash); orion5x_setup_dev0_win(DB88F5281_7SEG_BASE, DB88F5281_7SEG_SIZE); orion5x_setup_dev1_win(DB88F5281_NOR_BASE, DB88F5281_NOR_SIZE); platform_device_register(&db88f5281_nor_flash); orion5x_setup_dev2_win(DB88F5281_NAND_BASE, DB88F5281_NAND_SIZE); platform_device_register(&db88f5281_nand_flash); i2c_register_board_info(0, &db88f5281_i2c_rtc, 1); } MACHINE_START(DB88F5281, "Marvell Orion-2 Development Board") /* Maintainer: Tzachi Perelstein <tzachi@marvell.com> */ .atag_offset = 0x100, .init_machine = db88f5281_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .restart = orion5x_restart, MACHINE_END
gpl-2.0
shahan-mik3/android_kernel_xiaomi_cancro
drivers/watchdog/intel_scu_watchdog.c
7297
15505
/* * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device * for Intel part #(s): * - AF82MP20 PCH * * Copyright (C) 2009-2010 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR * PURPOSE. See the GNU General Public License for more details. * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * The full GNU General Public License is included in this * distribution in the file called COPYING. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/compiler.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/sfi.h> #include <asm/irq.h> #include <linux/atomic.h> #include <asm/intel_scu_ipc.h> #include <asm/apb_timer.h> #include <asm/mrst.h> #include "intel_scu_watchdog.h" /* Bounds number of times we will retry loading time count */ /* This retry is a work around for a silicon bug. */ #define MAX_RETRY 16 #define IPC_SET_WATCHDOG_TIMER 0xF8 static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN; module_param(timer_margin, int, 0); MODULE_PARM_DESC(timer_margin, "Watchdog timer margin" "Time between interrupt and resetting the system" "The range is from 1 to 160" "This is the time for all keep alives to arrive"); static int timer_set = DEFAULT_TIME; module_param(timer_set, int, 0); MODULE_PARM_DESC(timer_set, "Default Watchdog timer setting" "Complete cycle time" "The range is from 1 to 170" "This is the time for all keep alives to arrive"); /* After watchdog device is closed, check force_boot. If: * force_boot == 0, then force boot on next watchdog interrupt after close, * force_boot == 1, then force boot immediately when device is closed. */ static int force_boot; module_param(force_boot, int, 0); MODULE_PARM_DESC(force_boot, "A value of 1 means that the driver will reboot" "the system immediately if the /dev/watchdog device is closed" "A value of 0 means that when /dev/watchdog device is closed" "the watchdog timer will be refreshed for one more interval" "of length: timer_set. At the end of this interval, the" "watchdog timer will reset the system." ); /* there is only one device in the system now; this can be made into * an array in the future if we have more than one device */ static struct intel_scu_watchdog_dev watchdog_device; /* Forces restart, if force_reboot is set */ static void watchdog_fire(void) { if (force_boot) { pr_crit("Initiating system reboot\n"); emergency_restart(); pr_crit("Reboot didn't ?????\n"); } else { pr_crit("Immediate Reboot Disabled\n"); pr_crit("System will reset when watchdog timer times out!\n"); } } static int check_timer_margin(int new_margin) { if ((new_margin < MIN_TIME_CYCLE) || (new_margin > MAX_TIME - timer_set)) { pr_debug("value of new_margin %d is out of the range %d to %d\n", new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set); return -EINVAL; } return 0; } /* * IPC operations */ static int watchdog_set_ipc(int soft_threshold, int threshold) { u32 *ipc_wbuf; u8 cbuf[16] = { '\0' }; int ipc_ret = 0; ipc_wbuf = (u32 *)&cbuf; ipc_wbuf[0] = soft_threshold; ipc_wbuf[1] = threshold; ipc_ret = intel_scu_ipc_command( IPC_SET_WATCHDOG_TIMER, 0, ipc_wbuf, 2, NULL, 0); if (ipc_ret != 0) pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret); return ipc_ret; }; /* * Intel_SCU operations */ /* timer interrupt handler */ static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id) { int int_status; int_status = ioread32(watchdog_device.timer_interrupt_status_addr); pr_debug("irq, int_status: %x\n", int_status); if (int_status != 0) return IRQ_NONE; /* has the timer been started? If not, then this is spurious */ if (watchdog_device.timer_started == 0) { pr_debug("spurious interrupt received\n"); return IRQ_HANDLED; } /* temporarily disable the timer */ iowrite32(0x00000002, watchdog_device.timer_control_addr); /* set the timer to the threshold */ iowrite32(watchdog_device.threshold, watchdog_device.timer_load_count_addr); /* allow the timer to run */ iowrite32(0x00000003, watchdog_device.timer_control_addr); return IRQ_HANDLED; } static int intel_scu_keepalive(void) { /* read eoi register - clears interrupt */ ioread32(watchdog_device.timer_clear_interrupt_addr); /* temporarily disable the timer */ iowrite32(0x00000002, watchdog_device.timer_control_addr); /* set the timer to the soft_threshold */ iowrite32(watchdog_device.soft_threshold, watchdog_device.timer_load_count_addr); /* allow the timer to run */ iowrite32(0x00000003, watchdog_device.timer_control_addr); return 0; } static int intel_scu_stop(void) { iowrite32(0, watchdog_device.timer_control_addr); return 0; } static int intel_scu_set_heartbeat(u32 t) { int ipc_ret; int retry_count; u32 soft_value; u32 hw_pre_value; u32 hw_value; watchdog_device.timer_set = t; watchdog_device.threshold = timer_margin * watchdog_device.timer_tbl_ptr->freq_hz; watchdog_device.soft_threshold = (watchdog_device.timer_set - timer_margin) * watchdog_device.timer_tbl_ptr->freq_hz; pr_debug("set_heartbeat: timer freq is %d\n", watchdog_device.timer_tbl_ptr->freq_hz); pr_debug("set_heartbeat: timer_set is %x (hex)\n", watchdog_device.timer_set); pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin); pr_debug("set_heartbeat: threshold is %x (hex)\n", watchdog_device.threshold); pr_debug("set_heartbeat: soft_threshold is %x (hex)\n", watchdog_device.soft_threshold); /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */ /* watchdog timing come out right. */ watchdog_device.threshold = watchdog_device.threshold / FREQ_ADJUSTMENT; watchdog_device.soft_threshold = watchdog_device.soft_threshold / FREQ_ADJUSTMENT; /* temporarily disable the timer */ iowrite32(0x00000002, watchdog_device.timer_control_addr); /* send the threshold and soft_threshold via IPC to the processor */ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold, watchdog_device.threshold); if (ipc_ret != 0) { /* Make sure the watchdog timer is stopped */ intel_scu_stop(); return ipc_ret; } /* Soft Threshold set loop. Early versions of silicon did */ /* not always set this count correctly. This loop checks */ /* the value and retries if it was not set correctly. */ retry_count = 0; soft_value = watchdog_device.soft_threshold & 0xFFFF0000; do { /* Make sure timer is stopped */ intel_scu_stop(); if (MAX_RETRY < retry_count++) { /* Unable to set timer value */ pr_err("Unable to set timer\n"); return -ENODEV; } /* set the timer to the soft threshold */ iowrite32(watchdog_device.soft_threshold, watchdog_device.timer_load_count_addr); /* read count value before starting timer */ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr); hw_pre_value = hw_pre_value & 0xFFFF0000; /* Start the timer */ iowrite32(0x00000003, watchdog_device.timer_control_addr); /* read the value the time loaded into its count reg */ hw_value = ioread32(watchdog_device.timer_load_count_addr); hw_value = hw_value & 0xFFFF0000; } while (soft_value != hw_value); watchdog_device.timer_started = 1; return 0; } /* * /dev/watchdog handling */ static int intel_scu_open(struct inode *inode, struct file *file) { /* Set flag to indicate that watchdog device is open */ if (test_and_set_bit(0, &watchdog_device.driver_open)) return -EBUSY; /* Check for reopen of driver. Reopens are not allowed */ if (watchdog_device.driver_closed) return -EPERM; return nonseekable_open(inode, file); } static int intel_scu_release(struct inode *inode, struct file *file) { /* * This watchdog should not be closed, after the timer * is started with the WDIPC_SETTIMEOUT ioctl * If force_boot is set watchdog_fire() will cause an * immediate reset. If force_boot is not set, the watchdog * timer is refreshed for one more interval. At the end * of that interval, the watchdog timer will reset the system. */ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) { pr_debug("intel_scu_release, without open\n"); return -ENOTTY; } if (!watchdog_device.timer_started) { /* Just close, since timer has not been started */ pr_debug("closed, without starting timer\n"); return 0; } pr_crit("Unexpected close of /dev/watchdog!\n"); /* Since the timer was started, prevent future reopens */ watchdog_device.driver_closed = 1; /* Refresh the timer for one more interval */ intel_scu_keepalive(); /* Reboot system (if force_boot is set) */ watchdog_fire(); /* We should only reach this point if force_boot is not set */ return 0; } static ssize_t intel_scu_write(struct file *file, char const *data, size_t len, loff_t *ppos) { if (watchdog_device.timer_started) /* Watchdog already started, keep it alive */ intel_scu_keepalive(); else /* Start watchdog with timer value set by init */ intel_scu_set_heartbeat(watchdog_device.timer_set); return len; } static long intel_scu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; u32 __user *p = argp; u32 new_margin; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .firmware_version = 0, /* @todo Get from SCU via ipc_get_scu_fw_version()? */ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */ }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: intel_scu_keepalive(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, p)) return -EFAULT; if (check_timer_margin(new_margin)) return -EINVAL; if (intel_scu_set_heartbeat(new_margin)) return -EINVAL; return 0; case WDIOC_GETTIMEOUT: return put_user(watchdog_device.soft_threshold, p); default: return -ENOTTY; } } /* * Notifier for system down */ static int intel_scu_notify_sys(struct notifier_block *this, unsigned long code, void *another_unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn off the watchdog timer. */ intel_scu_stop(); return NOTIFY_DONE; } /* * Kernel Interfaces */ static const struct file_operations intel_scu_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = intel_scu_write, .unlocked_ioctl = intel_scu_ioctl, .open = intel_scu_open, .release = intel_scu_release, }; static int __init intel_scu_watchdog_init(void) { int ret; u32 __iomem *tmp_addr; /* * We don't really need to check this as the SFI timer get will fail * but if we do so we can exit with a clearer reason and no noise. * * If it isn't an intel MID device then it doesn't have this watchdog */ if (!mrst_identify_cpu()) return -ENODEV; /* Check boot parameters to verify that their initial values */ /* are in range. */ /* Check value of timer_set boot parameter */ if ((timer_set < MIN_TIME_CYCLE) || (timer_set > MAX_TIME - MIN_TIME_CYCLE)) { pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n", timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE); return -EINVAL; } /* Check value of timer_margin boot parameter */ if (check_timer_margin(timer_margin)) return -EINVAL; watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1); if (watchdog_device.timer_tbl_ptr == NULL) { pr_debug("timer is not available\n"); return -ENODEV; } /* make sure the timer exists */ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) { pr_debug("timer %d does not have valid physical memory\n", sfi_mtimer_num); return -ENODEV; } if (watchdog_device.timer_tbl_ptr->irq == 0) { pr_debug("timer %d invalid irq\n", sfi_mtimer_num); return -ENODEV; } tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr, 20); if (tmp_addr == NULL) { pr_debug("timer unable to ioremap\n"); return -ENOMEM; } watchdog_device.timer_load_count_addr = tmp_addr++; watchdog_device.timer_current_value_addr = tmp_addr++; watchdog_device.timer_control_addr = tmp_addr++; watchdog_device.timer_clear_interrupt_addr = tmp_addr++; watchdog_device.timer_interrupt_status_addr = tmp_addr++; /* Set the default time values in device structure */ watchdog_device.timer_set = timer_set; watchdog_device.threshold = timer_margin * watchdog_device.timer_tbl_ptr->freq_hz; watchdog_device.soft_threshold = (watchdog_device.timer_set - timer_margin) * watchdog_device.timer_tbl_ptr->freq_hz; watchdog_device.intel_scu_notifier.notifier_call = intel_scu_notify_sys; ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier); if (ret) { pr_err("cannot register notifier %d)\n", ret); goto register_reboot_error; } watchdog_device.miscdev.minor = WATCHDOG_MINOR; watchdog_device.miscdev.name = "watchdog"; watchdog_device.miscdev.fops = &intel_scu_fops; ret = misc_register(&watchdog_device.miscdev); if (ret) { pr_err("cannot register miscdev %d err =%d\n", WATCHDOG_MINOR, ret); goto misc_register_error; } ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq, watchdog_timer_interrupt, IRQF_SHARED, "watchdog", &watchdog_device.timer_load_count_addr); if (ret) { pr_err("error requesting irq %d\n", ret); goto request_irq_error; } /* Make sure timer is disabled before returning */ intel_scu_stop(); return 0; /* error cleanup */ request_irq_error: misc_deregister(&watchdog_device.miscdev); misc_register_error: unregister_reboot_notifier(&watchdog_device.intel_scu_notifier); register_reboot_error: intel_scu_stop(); iounmap(watchdog_device.timer_load_count_addr); return ret; } static void __exit intel_scu_watchdog_exit(void) { misc_deregister(&watchdog_device.miscdev); unregister_reboot_notifier(&watchdog_device.intel_scu_notifier); /* disable the timer */ iowrite32(0x00000002, watchdog_device.timer_control_addr); iounmap(watchdog_device.timer_load_count_addr); } late_initcall(intel_scu_watchdog_init); module_exit(intel_scu_watchdog_exit); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_VERSION(WDT_VER);
gpl-2.0
TeamRegular/android_kernel_madcatz_mojo
arch/mips/mm/uasm.c
7553
18097
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * A small micro-assembler. It is intentionally kept simple, does only * support a subset of instructions, and does not try to hide pipeline * effects like branch delay slots. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <asm/inst.h> #include <asm/elf.h> #include <asm/bugs.h> #include <asm/uasm.h> enum fields { RS = 0x001, RT = 0x002, RD = 0x004, RE = 0x008, SIMM = 0x010, UIMM = 0x020, BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, SET = 0x200, SCIMM = 0x400 }; #define OP_MASK 0x3f #define OP_SH 26 #define RS_MASK 0x1f #define RS_SH 21 #define RT_MASK 0x1f #define RT_SH 16 #define RD_MASK 0x1f #define RD_SH 11 #define RE_MASK 0x1f #define RE_SH 6 #define IMM_MASK 0xffff #define IMM_SH 0 #define JIMM_MASK 0x3ffffff #define JIMM_SH 0 #define FUNC_MASK 0x3f #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 #define SCIMM_MASK 0xfffff #define SCIMM_SH 6 enum opcode { insn_invalid, insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1, insn_lwx, insn_ldx }; struct insn { enum opcode opcode; u32 match; enum fields fields; }; /* This macro sets the non-variable bits of an instruction. */ #define M(a, b, c, d, e, f) \ ((a) << OP_SH \ | (b) << RS_SH \ | (c) << RT_SH \ | (d) << RD_SH \ | (e) << RE_SH \ | (f) << FUNC_SH) static struct insn insn_table[] __uasminitdata = { { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, { insn_invalid, 0, 0 } }; #undef M static inline __uasminit u32 build_rs(u32 arg) { WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RS_MASK) << RS_SH; } static inline __uasminit u32 build_rt(u32 arg) { WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RT_MASK) << RT_SH; } static inline __uasminit u32 build_rd(u32 arg) { WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RD_MASK) << RD_SH; } static inline __uasminit u32 build_re(u32 arg) { WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RE_MASK) << RE_SH; } static inline __uasminit u32 build_simm(s32 arg) { WARN(arg > 0x7fff || arg < -0x8000, KERN_WARNING "Micro-assembler field overflow\n"); return arg & 0xffff; } static inline __uasminit u32 build_uimm(u32 arg) { WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & IMM_MASK; } static inline __uasminit u32 build_bimm(s32 arg) { WARN(arg > 0x1ffff || arg < -0x20000, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); } static inline __uasminit u32 build_jimm(u32 arg) { WARN(arg & ~(JIMM_MASK << 2), KERN_WARNING "Micro-assembler field overflow\n"); return (arg >> 2) & JIMM_MASK; } static inline __uasminit u32 build_scimm(u32 arg) { WARN(arg & ~SCIMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & SCIMM_MASK) << SCIMM_SH; } static inline __uasminit u32 build_func(u32 arg) { WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & FUNC_MASK; } static inline __uasminit u32 build_set(u32 arg) { WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & SET_MASK; } /* * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. */ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) { struct insn *ip = NULL; unsigned int i; va_list ap; u32 op; for (i = 0; insn_table[i].opcode != insn_invalid; i++) if (insn_table[i].opcode == opc) { ip = &insn_table[i]; break; } if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) panic("Unsupported Micro-assembler instruction %d", opc); op = ip->match; va_start(ap, opc); if (ip->fields & RS) op |= build_rs(va_arg(ap, u32)); if (ip->fields & RT) op |= build_rt(va_arg(ap, u32)); if (ip->fields & RD) op |= build_rd(va_arg(ap, u32)); if (ip->fields & RE) op |= build_re(va_arg(ap, u32)); if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32)); if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32)); if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); if (ip->fields & SCIMM) op |= build_scimm(va_arg(ap, u32)); va_end(ap); **buf = op; (*buf)++; } #define I_u1u2u3(op) \ Ip_u1u2u3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1u3(op) \ Ip_u2u1u3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u3u1u2(op) \ Ip_u3u1u2(op) \ { \ build_insn(buf, insn##op, b, c, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2s3(op) \ Ip_u1u2s3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2s3u1(op) \ Ip_u2s3u1(op) \ { \ build_insn(buf, insn##op, c, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1s3(op) \ Ip_u2u1s3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msbu3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-1, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msb32u3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-33, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2(op) \ Ip_u1u2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1s2(op) \ Ip_u1s2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1(op) \ Ip_u1(op) \ { \ build_insn(buf, insn##op, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_0(op) \ Ip_0(op) \ { \ build_insn(buf, insn##op); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); I_u2u1s3(_addiu) I_u3u1u2(_addu) I_u2u1u3(_andi) I_u3u1u2(_and) I_u1u2s3(_beq) I_u1u2s3(_beql) I_u1s2(_bgez) I_u1s2(_bgezl) I_u1s2(_bltz) I_u1s2(_bltzl) I_u1u2s3(_bne) I_u2s3u1(_cache) I_u1u2u3(_dmfc0) I_u1u2u3(_dmtc0) I_u2u1s3(_daddiu) I_u3u1u2(_daddu) I_u2u1u3(_dsll) I_u2u1u3(_dsll32) I_u2u1u3(_dsra) I_u2u1u3(_dsrl) I_u2u1u3(_dsrl32) I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) I_u1(_j) I_u1(_jal) I_u1(_jr) I_u2s3u1(_ld) I_u2s3u1(_ll) I_u2s3u1(_lld) I_u1s2(_lui) I_u2s3u1(_lw) I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) I_u3u1u2(_or) I_0(_rfe) I_u2s3u1(_sc) I_u2s3u1(_scd) I_u2s3u1(_sd) I_u2u1u3(_sll) I_u2u1u3(_sra) I_u2u1u3(_srl) I_u2u1u3(_rotr) I_u3u1u2(_subu) I_u2s3u1(_sw) I_0(_tlbp) I_0(_tlbr) I_0(_tlbwi) I_0(_tlbwr) I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); I_u2u1msb32u3(_dinsm); I_u1(_syscall); I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); I_u3u1u2(_lwx) I_u3u1u2(_ldx) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include <asm/octeon/octeon.h> void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, unsigned int c) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. */ build_insn(buf, insn_pref, c, 28, b); else build_insn(buf, insn_pref, c, a, b); } UASM_EXPORT_SYMBOL(uasm_i_pref); #else I_u2s3u1(_pref) #endif /* Handle labels. */ void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) { (*lab)->addr = addr; (*lab)->lab = lid; (*lab)++; } UASM_EXPORT_SYMBOL(uasm_build_label); int __uasminit uasm_in_compat_space_p(long addr) { /* Is this address in 32bit compat space? */ #ifdef CONFIG_64BIT return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); #else return 1; #endif } UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); static int __uasminit uasm_rel_highest(long val) { #ifdef CONFIG_64BIT return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } static int __uasminit uasm_rel_higher(long val) { #ifdef CONFIG_64BIT return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } int __uasminit uasm_rel_hi(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(uasm_rel_hi); int __uasminit uasm_rel_lo(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(uasm_rel_lo); void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) { if (!uasm_in_compat_space_p(addr)) { uasm_i_lui(buf, rs, uasm_rel_highest(addr)); if (uasm_rel_higher(addr)) uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); if (uasm_rel_hi(addr)) { uasm_i_dsll(buf, rs, rs, 16); uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); uasm_i_dsll(buf, rs, rs, 16); } else uasm_i_dsll32(buf, rs, rs, 0); } else uasm_i_lui(buf, rs, uasm_rel_hi(addr)); } UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) { UASM_i_LA_mostly(buf, rs, addr); if (uasm_rel_lo(addr)) { if (!uasm_in_compat_space_p(addr)) uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); else uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); } } UASM_EXPORT_SYMBOL(UASM_i_LA); /* Handle relocations. */ void __uasminit uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) { (*rel)->addr = addr; (*rel)->type = R_MIPS_PC16; (*rel)->lab = lid; (*rel)++; } UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); static inline void __uasminit __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) { long laddr = (long)lab->addr; long raddr = (long)rel->addr; switch (rel->type) { case R_MIPS_PC16: *rel->addr |= build_bimm(laddr - (raddr + 4)); break; default: panic("Unsupported Micro-assembler relocation %d", rel->type); } } void __uasminit uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) { struct uasm_label *l; for (; rel->lab != UASM_LABEL_INVALID; rel++) for (l = lab; l->lab != UASM_LABEL_INVALID; l++) if (rel->lab == l->lab) __resolve_relocs(rel, l); } UASM_EXPORT_SYMBOL(uasm_resolve_relocs); void __uasminit uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != UASM_LABEL_INVALID; rel++) if (rel->addr >= first && rel->addr < end) rel->addr += off; } UASM_EXPORT_SYMBOL(uasm_move_relocs); void __uasminit uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != UASM_LABEL_INVALID; lab++) if (lab->addr >= first && lab->addr < end) lab->addr += off; } UASM_EXPORT_SYMBOL(uasm_move_labels); void __uasminit uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); memcpy(target, first, (end - first) * sizeof(u32)); uasm_move_relocs(rel, first, end, off); uasm_move_labels(lab, first, end, off); } UASM_EXPORT_SYMBOL(uasm_copy_handler); int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) { for (; rel->lab != UASM_LABEL_INVALID; rel++) { if (rel->addr == addr && (rel->type == R_MIPS_PC16 || rel->type == R_MIPS_26)) return 1; } return 0; } UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); /* Convenience functions for labeled branches. */ void __uasminit uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bltz(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bltz); void __uasminit uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_b(p, 0); } UASM_EXPORT_SYMBOL(uasm_il_b); void __uasminit uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_beqz(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_beqz); void __uasminit uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_beqzl(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_beqzl); void __uasminit uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bne(p, reg1, reg2, 0); } UASM_EXPORT_SYMBOL(uasm_il_bne); void __uasminit uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bnez(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bnez); void __uasminit uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bgezl(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bgezl); void __uasminit uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bgez(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bgez); void __uasminit uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bbit0(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(uasm_il_bbit0); void __uasminit uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bbit1(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(uasm_il_bbit1);
gpl-2.0
omnirom/android_kernel_lge_gee
arch/mips/mm/uasm.c
7553
18097
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * A small micro-assembler. It is intentionally kept simple, does only * support a subset of instructions, and does not try to hide pipeline * effects like branch delay slots. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <asm/inst.h> #include <asm/elf.h> #include <asm/bugs.h> #include <asm/uasm.h> enum fields { RS = 0x001, RT = 0x002, RD = 0x004, RE = 0x008, SIMM = 0x010, UIMM = 0x020, BIMM = 0x040, JIMM = 0x080, FUNC = 0x100, SET = 0x200, SCIMM = 0x400 }; #define OP_MASK 0x3f #define OP_SH 26 #define RS_MASK 0x1f #define RS_SH 21 #define RT_MASK 0x1f #define RT_SH 16 #define RD_MASK 0x1f #define RD_SH 11 #define RE_MASK 0x1f #define RE_SH 6 #define IMM_MASK 0xffff #define IMM_SH 0 #define JIMM_MASK 0x3ffffff #define JIMM_SH 0 #define FUNC_MASK 0x3f #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 #define SCIMM_MASK 0xfffff #define SCIMM_SH 6 enum opcode { insn_invalid, insn_addu, insn_addiu, insn_and, insn_andi, insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1, insn_lwx, insn_ldx }; struct insn { enum opcode opcode; u32 match; enum fields fields; }; /* This macro sets the non-variable bits of an instruction. */ #define M(a, b, c, d, e, f) \ ((a) << OP_SH \ | (b) << RS_SH \ | (c) << RT_SH \ | (d) << RD_SH \ | (e) << RE_SH \ | (f) << FUNC_SH) static struct insn insn_table[] __uasminitdata = { { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, { insn_invalid, 0, 0 } }; #undef M static inline __uasminit u32 build_rs(u32 arg) { WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RS_MASK) << RS_SH; } static inline __uasminit u32 build_rt(u32 arg) { WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RT_MASK) << RT_SH; } static inline __uasminit u32 build_rd(u32 arg) { WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RD_MASK) << RD_SH; } static inline __uasminit u32 build_re(u32 arg) { WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & RE_MASK) << RE_SH; } static inline __uasminit u32 build_simm(s32 arg) { WARN(arg > 0x7fff || arg < -0x8000, KERN_WARNING "Micro-assembler field overflow\n"); return arg & 0xffff; } static inline __uasminit u32 build_uimm(u32 arg) { WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & IMM_MASK; } static inline __uasminit u32 build_bimm(s32 arg) { WARN(arg > 0x1ffff || arg < -0x20000, KERN_WARNING "Micro-assembler field overflow\n"); WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); } static inline __uasminit u32 build_jimm(u32 arg) { WARN(arg & ~(JIMM_MASK << 2), KERN_WARNING "Micro-assembler field overflow\n"); return (arg >> 2) & JIMM_MASK; } static inline __uasminit u32 build_scimm(u32 arg) { WARN(arg & ~SCIMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return (arg & SCIMM_MASK) << SCIMM_SH; } static inline __uasminit u32 build_func(u32 arg) { WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & FUNC_MASK; } static inline __uasminit u32 build_set(u32 arg) { WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); return arg & SET_MASK; } /* * The order of opcode arguments is implicitly left to right, * starting with RS and ending with FUNC or IMM. */ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) { struct insn *ip = NULL; unsigned int i; va_list ap; u32 op; for (i = 0; insn_table[i].opcode != insn_invalid; i++) if (insn_table[i].opcode == opc) { ip = &insn_table[i]; break; } if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) panic("Unsupported Micro-assembler instruction %d", opc); op = ip->match; va_start(ap, opc); if (ip->fields & RS) op |= build_rs(va_arg(ap, u32)); if (ip->fields & RT) op |= build_rt(va_arg(ap, u32)); if (ip->fields & RD) op |= build_rd(va_arg(ap, u32)); if (ip->fields & RE) op |= build_re(va_arg(ap, u32)); if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32)); if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32)); if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32)); if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32)); if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32)); if (ip->fields & SET) op |= build_set(va_arg(ap, u32)); if (ip->fields & SCIMM) op |= build_scimm(va_arg(ap, u32)); va_end(ap); **buf = op; (*buf)++; } #define I_u1u2u3(op) \ Ip_u1u2u3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1u3(op) \ Ip_u2u1u3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u3u1u2(op) \ Ip_u3u1u2(op) \ { \ build_insn(buf, insn##op, b, c, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2s3(op) \ Ip_u1u2s3(op) \ { \ build_insn(buf, insn##op, a, b, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2s3u1(op) \ Ip_u2s3u1(op) \ { \ build_insn(buf, insn##op, c, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1s3(op) \ Ip_u2u1s3(op) \ { \ build_insn(buf, insn##op, b, a, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msbu3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-1, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u2u1msb32u3(op) \ Ip_u2u1msbu3(op) \ { \ build_insn(buf, insn##op, b, a, c+d-33, c); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1u2(op) \ Ip_u1u2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1s2(op) \ Ip_u1s2(op) \ { \ build_insn(buf, insn##op, a, b); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_u1(op) \ Ip_u1(op) \ { \ build_insn(buf, insn##op, a); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); #define I_0(op) \ Ip_0(op) \ { \ build_insn(buf, insn##op); \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); I_u2u1s3(_addiu) I_u3u1u2(_addu) I_u2u1u3(_andi) I_u3u1u2(_and) I_u1u2s3(_beq) I_u1u2s3(_beql) I_u1s2(_bgez) I_u1s2(_bgezl) I_u1s2(_bltz) I_u1s2(_bltzl) I_u1u2s3(_bne) I_u2s3u1(_cache) I_u1u2u3(_dmfc0) I_u1u2u3(_dmtc0) I_u2u1s3(_daddiu) I_u3u1u2(_daddu) I_u2u1u3(_dsll) I_u2u1u3(_dsll32) I_u2u1u3(_dsra) I_u2u1u3(_dsrl) I_u2u1u3(_dsrl32) I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) I_u1(_j) I_u1(_jal) I_u1(_jr) I_u2s3u1(_ld) I_u2s3u1(_ll) I_u2s3u1(_lld) I_u1s2(_lui) I_u2s3u1(_lw) I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) I_u3u1u2(_or) I_0(_rfe) I_u2s3u1(_sc) I_u2s3u1(_scd) I_u2s3u1(_sd) I_u2u1u3(_sll) I_u2u1u3(_sra) I_u2u1u3(_srl) I_u2u1u3(_rotr) I_u3u1u2(_subu) I_u2s3u1(_sw) I_0(_tlbp) I_0(_tlbr) I_0(_tlbwi) I_0(_tlbwr) I_u3u1u2(_xor) I_u2u1u3(_xori) I_u2u1msbu3(_dins); I_u2u1msb32u3(_dinsm); I_u1(_syscall); I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); I_u3u1u2(_lwx) I_u3u1u2(_ldx) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include <asm/octeon/octeon.h> void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, unsigned int c) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. */ build_insn(buf, insn_pref, c, 28, b); else build_insn(buf, insn_pref, c, a, b); } UASM_EXPORT_SYMBOL(uasm_i_pref); #else I_u2s3u1(_pref) #endif /* Handle labels. */ void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) { (*lab)->addr = addr; (*lab)->lab = lid; (*lab)++; } UASM_EXPORT_SYMBOL(uasm_build_label); int __uasminit uasm_in_compat_space_p(long addr) { /* Is this address in 32bit compat space? */ #ifdef CONFIG_64BIT return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); #else return 1; #endif } UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); static int __uasminit uasm_rel_highest(long val) { #ifdef CONFIG_64BIT return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } static int __uasminit uasm_rel_higher(long val) { #ifdef CONFIG_64BIT return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; #else return 0; #endif } int __uasminit uasm_rel_hi(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(uasm_rel_hi); int __uasminit uasm_rel_lo(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } UASM_EXPORT_SYMBOL(uasm_rel_lo); void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) { if (!uasm_in_compat_space_p(addr)) { uasm_i_lui(buf, rs, uasm_rel_highest(addr)); if (uasm_rel_higher(addr)) uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); if (uasm_rel_hi(addr)) { uasm_i_dsll(buf, rs, rs, 16); uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); uasm_i_dsll(buf, rs, rs, 16); } else uasm_i_dsll32(buf, rs, rs, 0); } else uasm_i_lui(buf, rs, uasm_rel_hi(addr)); } UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) { UASM_i_LA_mostly(buf, rs, addr); if (uasm_rel_lo(addr)) { if (!uasm_in_compat_space_p(addr)) uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); else uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); } } UASM_EXPORT_SYMBOL(UASM_i_LA); /* Handle relocations. */ void __uasminit uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) { (*rel)->addr = addr; (*rel)->type = R_MIPS_PC16; (*rel)->lab = lid; (*rel)++; } UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); static inline void __uasminit __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) { long laddr = (long)lab->addr; long raddr = (long)rel->addr; switch (rel->type) { case R_MIPS_PC16: *rel->addr |= build_bimm(laddr - (raddr + 4)); break; default: panic("Unsupported Micro-assembler relocation %d", rel->type); } } void __uasminit uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) { struct uasm_label *l; for (; rel->lab != UASM_LABEL_INVALID; rel++) for (l = lab; l->lab != UASM_LABEL_INVALID; l++) if (rel->lab == l->lab) __resolve_relocs(rel, l); } UASM_EXPORT_SYMBOL(uasm_resolve_relocs); void __uasminit uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != UASM_LABEL_INVALID; rel++) if (rel->addr >= first && rel->addr < end) rel->addr += off; } UASM_EXPORT_SYMBOL(uasm_move_relocs); void __uasminit uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != UASM_LABEL_INVALID; lab++) if (lab->addr >= first && lab->addr < end) lab->addr += off; } UASM_EXPORT_SYMBOL(uasm_move_labels); void __uasminit uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); memcpy(target, first, (end - first) * sizeof(u32)); uasm_move_relocs(rel, first, end, off); uasm_move_labels(lab, first, end, off); } UASM_EXPORT_SYMBOL(uasm_copy_handler); int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) { for (; rel->lab != UASM_LABEL_INVALID; rel++) { if (rel->addr == addr && (rel->type == R_MIPS_PC16 || rel->type == R_MIPS_26)) return 1; } return 0; } UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); /* Convenience functions for labeled branches. */ void __uasminit uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bltz(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bltz); void __uasminit uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_b(p, 0); } UASM_EXPORT_SYMBOL(uasm_il_b); void __uasminit uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_beqz(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_beqz); void __uasminit uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_beqzl(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_beqzl); void __uasminit uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bne(p, reg1, reg2, 0); } UASM_EXPORT_SYMBOL(uasm_il_bne); void __uasminit uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bnez(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bnez); void __uasminit uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bgezl(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bgezl); void __uasminit uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bgez(p, reg, 0); } UASM_EXPORT_SYMBOL(uasm_il_bgez); void __uasminit uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bbit0(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(uasm_il_bbit0); void __uasminit uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); uasm_i_bbit1(p, reg, bit, 0); } UASM_EXPORT_SYMBOL(uasm_il_bbit1);
gpl-2.0
kaldaris/WIDzard-A850K
security/tomoyo/realpath.c
7809
8031
/* * security/tomoyo/realpath.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/magic.h> /** * tomoyo_encode2 - Encode binary string to ascii string. * * @str: String in binary format. * @str_len: Size of @str in byte. * * Returns pointer to @str in ascii format on success, NULL otherwise. * * This function uses kzalloc(), so caller must kfree() if this function * didn't return NULL. */ char *tomoyo_encode2(const char *str, int str_len) { int i; int len = 0; const char *p = str; char *cp; char *cp0; if (!p) return NULL; for (i = 0; i < str_len; i++) { const unsigned char c = p[i]; if (c == '\\') len += 2; else if (c > ' ' && c < 127) len++; else len += 4; } len++; /* Reserve space for appending "/". */ cp = kzalloc(len + 10, GFP_NOFS); if (!cp) return NULL; cp0 = cp; p = str; for (i = 0; i < str_len; i++) { const unsigned char c = p[i]; if (c == '\\') { *cp++ = '\\'; *cp++ = '\\'; } else if (c > ' ' && c < 127) { *cp++ = c; } else { *cp++ = '\\'; *cp++ = (c >> 6) + '0'; *cp++ = ((c >> 3) & 7) + '0'; *cp++ = (c & 7) + '0'; } } return cp0; } /** * tomoyo_encode - Encode binary string to ascii string. * * @str: String in binary format. * * Returns pointer to @str in ascii format on success, NULL otherwise. * * This function uses kzalloc(), so caller must kfree() if this function * didn't return NULL. */ char *tomoyo_encode(const char *str) { return str ? tomoyo_encode2(str, strlen(str)) : NULL; } /** * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root. * * @path: Pointer to "struct path". * @buffer: Pointer to buffer to return value in. * @buflen: Sizeof @buffer. * * Returns the buffer on success, an error code otherwise. * * If dentry is a directory, trailing '/' is appended. */ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer, const int buflen) { char *pos = ERR_PTR(-ENOMEM); if (buflen >= 256) { /* go to whatever namespace root we are under */ pos = d_absolute_path(path, buffer, buflen - 1); if (!IS_ERR(pos) && *pos == '/' && pos[1]) { struct inode *inode = path->dentry->d_inode; if (inode && S_ISDIR(inode->i_mode)) { buffer[buflen - 2] = '/'; buffer[buflen - 1] = '\0'; } } } return pos; } /** * tomoyo_get_dentry_path - Get the path of a dentry. * * @dentry: Pointer to "struct dentry". * @buffer: Pointer to buffer to return value in. * @buflen: Sizeof @buffer. * * Returns the buffer on success, an error code otherwise. * * If dentry is a directory, trailing '/' is appended. */ static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer, const int buflen) { char *pos = ERR_PTR(-ENOMEM); if (buflen >= 256) { pos = dentry_path_raw(dentry, buffer, buflen - 1); if (!IS_ERR(pos) && *pos == '/' && pos[1]) { struct inode *inode = dentry->d_inode; if (inode && S_ISDIR(inode->i_mode)) { buffer[buflen - 2] = '/'; buffer[buflen - 1] = '\0'; } } } return pos; } /** * tomoyo_get_local_path - Get the path of a dentry. * * @dentry: Pointer to "struct dentry". * @buffer: Pointer to buffer to return value in. * @buflen: Sizeof @buffer. * * Returns the buffer on success, an error code otherwise. */ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer, const int buflen) { struct super_block *sb = dentry->d_sb; char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen); if (IS_ERR(pos)) return pos; /* Convert from $PID to self if $PID is current thread. */ if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') { char *ep; const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10); if (*ep == '/' && pid && pid == task_tgid_nr_ns(current, sb->s_fs_info)) { pos = ep - 5; if (pos < buffer) goto out; memmove(pos, "/self", 5); } goto prepend_filesystem_name; } /* Use filesystem name for unnamed devices. */ if (!MAJOR(sb->s_dev)) goto prepend_filesystem_name; { struct inode *inode = sb->s_root->d_inode; /* * Use filesystem name if filesystem does not support rename() * operation. */ if (inode->i_op && !inode->i_op->rename) goto prepend_filesystem_name; } /* Prepend device name. */ { char name[64]; int name_len; const dev_t dev = sb->s_dev; name[sizeof(name) - 1] = '\0'; snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev), MINOR(dev)); name_len = strlen(name); pos -= name_len; if (pos < buffer) goto out; memmove(pos, name, name_len); return pos; } /* Prepend filesystem name. */ prepend_filesystem_name: { const char *name = sb->s_type->name; const int name_len = strlen(name); pos -= name_len + 1; if (pos < buffer) goto out; memmove(pos, name, name_len); pos[name_len] = ':'; } return pos; out: return ERR_PTR(-ENOMEM); } /** * tomoyo_get_socket_name - Get the name of a socket. * * @path: Pointer to "struct path". * @buffer: Pointer to buffer to return value in. * @buflen: Sizeof @buffer. * * Returns the buffer. */ static char *tomoyo_get_socket_name(struct path *path, char * const buffer, const int buflen) { struct inode *inode = path->dentry->d_inode; struct socket *sock = inode ? SOCKET_I(inode) : NULL; struct sock *sk = sock ? sock->sk : NULL; if (sk) { snprintf(buffer, buflen, "socket:[family=%u:type=%u:" "protocol=%u]", sk->sk_family, sk->sk_type, sk->sk_protocol); } else { snprintf(buffer, buflen, "socket:[unknown]"); } return buffer; } /** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * * @path: Pointer to "struct path". * * Returns the realpath of the given @path on success, NULL otherwise. * * If dentry is a directory, trailing '/' is appended. * Characters out of 0x20 < c < 0x7F range are converted to * \ooo style octal string. * Character \ is converted to \\ string. * * These functions use kzalloc(), so the caller must call kfree() * if these functions didn't return NULL. */ char *tomoyo_realpath_from_path(struct path *path) { char *buf = NULL; char *name = NULL; unsigned int buf_len = PAGE_SIZE / 2; struct dentry *dentry = path->dentry; struct super_block *sb; if (!dentry) return NULL; sb = dentry->d_sb; while (1) { char *pos; struct inode *inode; buf_len <<= 1; kfree(buf); buf = kmalloc(buf_len, GFP_NOFS); if (!buf) break; /* To make sure that pos is '\0' terminated. */ buf[buf_len - 1] = '\0'; /* Get better name for socket. */ if (sb->s_magic == SOCKFS_MAGIC) { pos = tomoyo_get_socket_name(path, buf, buf_len - 1); goto encode; } /* For "pipe:[\$]". */ if (dentry->d_op && dentry->d_op->d_dname) { pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1); goto encode; } inode = sb->s_root->d_inode; /* * Get local name for filesystems without rename() operation * or dentry without vfsmount. */ if (!path->mnt || (inode->i_op && !inode->i_op->rename)) pos = tomoyo_get_local_path(path->dentry, buf, buf_len - 1); /* Get absolute name for the rest. */ else { pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); /* * Fall back to local name if absolute name is not * available. */ if (pos == ERR_PTR(-EINVAL)) pos = tomoyo_get_local_path(path->dentry, buf, buf_len - 1); } encode: if (IS_ERR(pos)) continue; name = tomoyo_encode(pos); break; } kfree(buf); if (!name) tomoyo_warn_oom(__func__); return name; } /** * tomoyo_realpath_nofollow - Get realpath of a pathname. * * @pathname: The pathname to solve. * * Returns the realpath of @pathname on success, NULL otherwise. */ char *tomoyo_realpath_nofollow(const char *pathname) { struct path path; if (pathname && kern_path(pathname, 0, &path) == 0) { char *buf = tomoyo_realpath_from_path(&path); path_put(&path); return buf; } return NULL; }
gpl-2.0
forfivo/v500_kernel_aosp
drivers/leds/ledtrig-heartbeat.c
8833
3205
/* * LED Heartbeat Trigger * * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> * * Based on Richard Purdie's ledtrig-timer.c and some arch's * CONFIG_HEARTBEAT code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/leds.h> #include "leds.h" struct heartbeat_trig_data { unsigned int phase; unsigned int period; struct timer_list timer; }; static void led_heartbeat_function(unsigned long data) { struct led_classdev *led_cdev = (struct led_classdev *) data; struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data; unsigned long brightness = LED_OFF; unsigned long delay = 0; /* acts like an actual heart beat -- ie thump-thump-pause... */ switch (heartbeat_data->phase) { case 0: /* * The hyperbolic function below modifies the * heartbeat period length in dependency of the * current (1min) load. It goes through the points * f(0)=1260, f(1)=860, f(5)=510, f(inf)->300. */ heartbeat_data->period = 300 + (6720 << FSHIFT) / (5 * avenrun[0] + (7 << FSHIFT)); heartbeat_data->period = msecs_to_jiffies(heartbeat_data->period); delay = msecs_to_jiffies(70); heartbeat_data->phase++; brightness = led_cdev->max_brightness; break; case 1: delay = heartbeat_data->period / 4 - msecs_to_jiffies(70); heartbeat_data->phase++; break; case 2: delay = msecs_to_jiffies(70); heartbeat_data->phase++; brightness = led_cdev->max_brightness; break; default: delay = heartbeat_data->period - heartbeat_data->period / 4 - msecs_to_jiffies(70); heartbeat_data->phase = 0; break; } led_set_brightness(led_cdev, brightness); mod_timer(&heartbeat_data->timer, jiffies + delay); } static void heartbeat_trig_activate(struct led_classdev *led_cdev) { struct heartbeat_trig_data *heartbeat_data; heartbeat_data = kzalloc(sizeof(*heartbeat_data), GFP_KERNEL); if (!heartbeat_data) return; led_cdev->trigger_data = heartbeat_data; setup_timer(&heartbeat_data->timer, led_heartbeat_function, (unsigned long) led_cdev); heartbeat_data->phase = 0; led_heartbeat_function(heartbeat_data->timer.data); } static void heartbeat_trig_deactivate(struct led_classdev *led_cdev) { struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data; if (heartbeat_data) { del_timer_sync(&heartbeat_data->timer); kfree(heartbeat_data); } } static struct led_trigger heartbeat_led_trigger = { .name = "heartbeat", .activate = heartbeat_trig_activate, .deactivate = heartbeat_trig_deactivate, }; static int __init heartbeat_trig_init(void) { return led_trigger_register(&heartbeat_led_trigger); } static void __exit heartbeat_trig_exit(void) { led_trigger_unregister(&heartbeat_led_trigger); } module_init(heartbeat_trig_init); module_exit(heartbeat_trig_exit); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_DESCRIPTION("Heartbeat LED trigger"); MODULE_LICENSE("GPL");
gpl-2.0
whyorean/android_kernel_xiaomi_msm8996
arch/x86/kernel/setup.c
130
32073
/* * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * * Memory region support * David Parsons <orc@pell.chi.il.us>, July-August 1999 * * Added E820 sanitization routine (removes overlapping memory regions); * Brian Moyle <bmoyle@mvista.com>, February 2001 * * Moved CPU detection code to cpu/${cpu}.c * Patrick Mochel <mochel@osdl.org>, March 2002 * * Provisions for empty E820 memory regions (reported by certain BIOSes). * Alex Achenbach <xela@slit.de>, December 2002. * */ /* * This file handles the architecture-dependent parts of initialization */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/screen_info.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/sfi.h> #include <linux/apm_bios.h> #include <linux/initrd.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/seq_file.h> #include <linux/console.h> #include <linux/root_dev.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/init.h> #include <linux/edd.h> #include <linux/iscsi_ibft.h> #include <linux/nodemask.h> #include <linux/kexec.h> #include <linux/dmi.h> #include <linux/pfn.h> #include <linux/pci.h> #include <asm/pci-direct.h> #include <linux/init_ohci1394_dma.h> #include <linux/kvm_para.h> #include <linux/dma-contiguous.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/kallsyms.h> #include <linux/cpufreq.h> #include <linux/dma-mapping.h> #include <linux/ctype.h> #include <linux/uaccess.h> #include <linux/percpu.h> #include <linux/crash_dump.h> #include <linux/tboot.h> #include <linux/jiffies.h> #include <video/edid.h> #include <asm/mtrr.h> #include <asm/apic.h> #include <asm/realmode.h> #include <asm/e820.h> #include <asm/mpspec.h> #include <asm/setup.h> #include <asm/efi.h> #include <asm/timer.h> #include <asm/i8259.h> #include <asm/sections.h> #include <asm/io_apic.h> #include <asm/ist.h> #include <asm/setup_arch.h> #include <asm/bios_ebda.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/bugs.h> #include <asm/kasan.h> #include <asm/vsyscall.h> #include <asm/cpu.h> #include <asm/desc.h> #include <asm/dma.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <asm/paravirt.h> #include <asm/hypervisor.h> #include <asm/olpc_ofw.h> #include <asm/percpu.h> #include <asm/topology.h> #include <asm/apicdef.h> #include <asm/amd_nb.h> #include <asm/mce.h> #include <asm/alternative.h> #include <asm/prom.h> /* * max_low_pfn_mapped: highest direct mapped pfn under 4GB * max_pfn_mapped: highest direct mapped pfn over 4GB * * The direct mapping only covers E820_RAM regions, so the ranges and gaps are * represented by pfn_mapped */ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif static __initdata unsigned long _brk_start = (unsigned long)__brk_base; unsigned long _brk_end = (unsigned long)__brk_base; #ifdef CONFIG_X86_64 int default_cpu_present_to_apicid(int mps_cpu) { return __default_cpu_present_to_apicid(mps_cpu); } int default_check_phys_apicid_present(int phys_apicid) { return __default_check_phys_apicid_present(phys_apicid); } #endif struct boot_params boot_params; /* * Machine setup.. */ static struct resource data_resource = { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource code_resource = { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource bss_resource = { .name = "Kernel bss", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; #ifdef CONFIG_X86_32 /* cpu data as detected by the assembly code in head.S */ struct cpuinfo_x86 new_cpu_data = { .wp_works_ok = -1, }; /* common cpu data for all cpus */ struct cpuinfo_x86 boot_cpu_data __read_mostly = { .wp_works_ok = -1, }; EXPORT_SYMBOL(boot_cpu_data); unsigned int def_to_bigsmp; /* for MCA, but anyone else can use it if they want */ unsigned int machine_id; unsigned int machine_submodel_id; unsigned int BIOS_revision; struct apm_info apm_info; EXPORT_SYMBOL(apm_info); #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE) struct ist_info ist_info; EXPORT_SYMBOL(ist_info); #else struct ist_info ist_info; #endif #else struct cpuinfo_x86 boot_cpu_data __read_mostly = { .x86_phys_bits = MAX_PHYSMEM_BITS, }; EXPORT_SYMBOL(boot_cpu_data); #endif #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) __visible unsigned long mmu_cr4_features; #else __visible unsigned long mmu_cr4_features = X86_CR4_PAE; #endif /* Boot loader ID and version as integers, for the benefit of proc_dointvec */ int bootloader_type, bootloader_version; /* * Setup options */ struct screen_info screen_info; EXPORT_SYMBOL(screen_info); struct edid_info edid_info; EXPORT_SYMBOL_GPL(edid_info); extern int root_mountflags; unsigned long saved_video_mode; #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 static char __initdata command_line[COMMAND_LINE_SIZE]; #ifdef CONFIG_CMDLINE_BOOL static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; #endif #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) struct edd edd; #ifdef CONFIG_EDD_MODULE EXPORT_SYMBOL(edd); #endif /** * copy_edd() - Copy the BIOS EDD information * from boot_params into a safe place. * */ static inline void __init copy_edd(void) { memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer, sizeof(edd.mbr_signature)); memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info)); edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries; edd.edd_info_nr = boot_params.eddbuf_entries; } #else static inline void __init copy_edd(void) { } #endif void * __init extend_brk(size_t size, size_t align) { size_t mask = align - 1; void *ret; BUG_ON(_brk_start == 0); BUG_ON(align & mask); _brk_end = (_brk_end + mask) & ~mask; BUG_ON((char *)(_brk_end + size) > __brk_limit); ret = (void *)_brk_end; _brk_end += size; memset(ret, 0, size); return ret; } #ifdef CONFIG_X86_32 static void __init cleanup_highmap(void) { } #endif static void __init reserve_brk(void) { if (_brk_end > _brk_start) memblock_reserve(__pa_symbol(_brk_start), _brk_end - _brk_start); /* Mark brk area as locked down and no longer taking any new allocations */ _brk_start = 0; } u64 relocated_ramdisk; #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32; return ramdisk_image; } static u64 __init get_ramdisk_size(void) { u64 ramdisk_size = boot_params.hdr.ramdisk_size; ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32; return ramdisk_size; } #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 area_size = PAGE_ALIGN(ramdisk_size); unsigned long slop, clen, mapaddr; char *p, *q; /* We need to move the initrd down into directly mapped mem */ relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped), area_size, PAGE_SIZE); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); /* Note: this includes all the mem currently occupied by the initrd, we rely on that fact to keep the data intact. */ memblock_reserve(relocated_ramdisk, area_size); initrd_start = relocated_ramdisk + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); q = (char *)initrd_start; /* Copy the initrd */ while (ramdisk_size) { slop = ramdisk_image & ~PAGE_MASK; clen = ramdisk_size; if (clen > MAX_MAP_CHUNK-slop) clen = MAX_MAP_CHUNK-slop; mapaddr = ramdisk_image & PAGE_MASK; p = early_memremap(mapaddr, clen+slop); memcpy(q, p+slop, clen); early_iounmap(p, clen+slop); q += clen; ramdisk_image += clen; ramdisk_size -= clen; } ramdisk_image = get_ramdisk_image(); ramdisk_size = get_ramdisk_size(); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", ramdisk_image, ramdisk_image + ramdisk_size - 1, relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); } static void __init early_reserve_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); if (!boot_params.hdr.type_of_loader || !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image); } static void __init reserve_initrd(void) { /* Assume only end is not page aligned */ u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); u64 mapped_size; if (!boot_params.hdr.type_of_loader || !ramdisk_image || !ramdisk_size) return; /* No initrd provided by bootloader */ initrd_start = 0; mapped_size = memblock_mem_size(max_pfn_mapped); if (ramdisk_size >= (mapped_size>>1)) panic("initrd too large to handle, " "disabling initrd (%lld needed, %lld available)\n", ramdisk_size, mapped_size>>1); printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image, ramdisk_end - 1); if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), PFN_DOWN(ramdisk_end))) { /* All are mapped, easy case */ initrd_start = ramdisk_image + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; return; } relocate_initrd(); memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); } #else static void __init early_reserve_initrd(void) { } static void __init reserve_initrd(void) { } #endif /* CONFIG_BLK_DEV_INITRD */ static void __init parse_setup_data(void) { struct setup_data *data; u64 pa_data, pa_next; pa_data = boot_params.hdr.setup_data; while (pa_data) { u32 data_len, map_len, data_type; map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), (u64)sizeof(struct setup_data)); data = early_memremap(pa_data, map_len); data_len = data->len + sizeof(struct setup_data); data_type = data->type; pa_next = data->next; early_iounmap(data, map_len); switch (data_type) { case SETUP_E820_EXT: parse_e820_ext(pa_data, data_len); break; case SETUP_DTB: add_dtb(pa_data); break; case SETUP_EFI: parse_efi_setup(pa_data, data_len); break; default: break; } pa_data = pa_next; } } static void __init e820_reserve_setup_data(void) { struct setup_data *data; u64 pa_data; int found = 0; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); e820_update_range(pa_data, sizeof(*data)+data->len, E820_RAM, E820_RESERVED_KERN); found = 1; pa_data = data->next; early_iounmap(data, sizeof(*data)); } if (!found) return; sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memcpy(&e820_saved, &e820, sizeof(struct e820map)); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("reserve setup_data"); } static void __init memblock_x86_reserve_range_setup_data(void) { struct setup_data *data; u64 pa_data; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); memblock_reserve(pa_data, sizeof(*data) + data->len); pa_data = data->next; early_iounmap(data, sizeof(*data)); } } /* * --------- Crashkernel reservation ------------------------------ */ #ifdef CONFIG_KEXEC /* * Keep the crash kernel below this limit. On 32 bits earlier kernels * would limit the kernel to the low 512 MiB due to mapping restrictions. * On 64bit, old kexec-tools need to under 896MiB. */ #ifdef CONFIG_X86_32 # define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) # define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) #else # define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) # define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM #endif static void __init reserve_crashkernel_low(void) { #ifdef CONFIG_X86_64 const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long low_base = 0, low_size = 0; unsigned long total_low_mem; unsigned long long base; bool auto_set = false; int ret; total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); /* crashkernel=Y,low */ ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base); if (ret != 0) { /* * two parts from lib/swiotlb.c: * swiotlb size: user specified with swiotlb= or default. * swiotlb overflow buffer: now is hardcoded to 32k. * We round it to 8M for other buffers that * may need to stay low too. */ low_size = swiotlb_size_or_default() + (8UL<<20); auto_set = true; } else { /* passed with crashkernel=0,low ? */ if (!low_size) return; } low_base = memblock_find_in_range(low_size, (1ULL<<32), low_size, alignment); if (!low_base) { if (!auto_set) pr_info("crashkernel low reservation failed - No suitable area found.\n"); return; } memblock_reserve(low_base, low_size); pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n", (unsigned long)(low_size >> 20), (unsigned long)(low_base >> 20), (unsigned long)(total_low_mem >> 20)); crashk_low_res.start = low_base; crashk_low_res.end = low_base + low_size - 1; insert_resource(&iomem_resource, &crashk_low_res); #endif } static void __init reserve_crashkernel(void) { const unsigned long long alignment = 16<<20; /* 16M */ unsigned long long total_mem; unsigned long long crash_size, crash_base; bool high = false; int ret; total_mem = memblock_phys_mem_size(); /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) { /* crashkernel=X,high */ ret = parse_crashkernel_high(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) return; high = true; } /* 0 means: find the address automatically */ if (crash_base <= 0) { /* * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX */ crash_base = memblock_find_in_range(alignment, high ? CRASH_KERNEL_ADDR_HIGH_MAX : CRASH_KERNEL_ADDR_LOW_MAX, crash_size, alignment); if (!crash_base) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; } } else { unsigned long long start; start = memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1<<20); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; } } memblock_reserve(crash_base, crash_size); printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crash_base >> 20), (unsigned long)(total_mem >> 20)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); if (crash_base >= (1ULL<<32)) reserve_crashkernel_low(); } #else static void __init reserve_crashkernel(void) { } #endif static struct resource standard_io_resources[] = { { .name = "dma1", .start = 0x00, .end = 0x1f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer0", .start = 0x40, .end = 0x43, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "timer1", .start = 0x50, .end = 0x53, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "keyboard", .start = 0x60, .end = 0x60, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "keyboard", .start = 0x64, .end = 0x64, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma page reg", .start = 0x80, .end = 0x8f, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "dma2", .start = 0xc0, .end = 0xdf, .flags = IORESOURCE_BUSY | IORESOURCE_IO }, { .name = "fpu", .start = 0xf0, .end = 0xff, .flags = IORESOURCE_BUSY | IORESOURCE_IO } }; void __init reserve_standard_io_resources(void) { int i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) request_resource(&ioport_resource, &standard_io_resources[i]); } static __init void reserve_ibft_region(void) { unsigned long addr, size = 0; addr = find_ibft_region(&size); if (size) memblock_reserve(addr, size); } static bool __init snb_gfx_workaround_needed(void) { #ifdef CONFIG_PCI int i; u16 vendor, devid; static const __initconst u16 snb_ids[] = { 0x0102, 0x0112, 0x0122, 0x0106, 0x0116, 0x0126, 0x010a, }; /* Assume no if something weird is going on with PCI */ if (!early_pci_allowed()) return false; vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID); if (vendor != 0x8086) return false; devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID); for (i = 0; i < ARRAY_SIZE(snb_ids); i++) if (devid == snb_ids[i]) return true; #endif return false; } /* * Sandy Bridge graphics has trouble with certain ranges, exclude * them from allocation. */ static void __init trim_snb_memory(void) { static const __initconst unsigned long bad_pages[] = { 0x20050000, 0x20110000, 0x20130000, 0x20138000, 0x40004000, }; int i; if (!snb_gfx_workaround_needed()) return; printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n"); /* * Reserve all memory below the 1 MB mark that has not * already been reserved. */ memblock_reserve(0, 1<<20); for (i = 0; i < ARRAY_SIZE(bad_pages); i++) { if (memblock_reserve(bad_pages[i], PAGE_SIZE)) printk(KERN_WARNING "failed to reserve 0x%08lx\n", bad_pages[i]); } } /* * Here we put platform-specific memory range workarounds, i.e. * memory known to be corrupt or otherwise in need to be reserved on * specific platforms. * * If this gets used more widely it could use a real dispatch mechanism. */ static void __init trim_platform_memory_ranges(void) { trim_snb_memory(); } static void __init trim_bios_range(void) { /* * A special case is the first 4Kb of memory; * This is a BIOS owned area, not kernel ram, but generally * not listed as such in the E820 table. * * This typically reserves additional memory (64KiB by default) * since some BIOSes are known to corrupt low memory. See the * Kconfig help text for X86_RESERVE_LOW. */ e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED); /* * special case: Some BIOSen report the PC BIOS * area (640->1Mb) as ram even though it is not. * take them out. */ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } /* called before trim_bios_range() to spare extra sanitize */ static void __init e820_add_kernel_range(void) { u64 start = __pa_symbol(_text); u64 size = __pa_symbol(_end) - start; /* * Complain if .text .data and .bss are not marked as E820_RAM and * attempt to fix it by adding the range. We may have a confused BIOS, * or the user may have used memmap=exactmap or memmap=xxM$yyM to * exclude kernel range. If we really are running on top non-RAM, * we will crash later anyways. */ if (e820_all_mapped(start, start + size, E820_RAM)) return; pr_warn(".text .data .bss are not marked as E820_RAM!\n"); e820_remove_range(start, size, E820_RAM, 0); e820_add_region(start, size, E820_RAM); } static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; static int __init parse_reservelow(char *p) { unsigned long long size; if (!p) return -EINVAL; size = memparse(p, &p); if (size < 4096) size = 4096; if (size > 640*1024) size = 640*1024; reserve_low = size; return 0; } early_param("reservelow", parse_reservelow); static void __init trim_low_memory_range(void) { memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); } /* * Dump out kernel offset information on panic. */ static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx " "(relocation range: 0x%lx-0x%lx)\n", (unsigned long)&_text - __START_KERNEL, __START_KERNEL, __START_KERNEL_map, MODULES_VADDR-1); return 0; } /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures * for initialization. Note, the efi init code path is determined by the * global efi_enabled. This allows the same kernel image to be used on existing * systems (with a traditional BIOS) as well as on EFI systems. */ /* * setup_arch - architecture-specific boot-time initializations * * Note: On x86_64, fixmaps are ready for use even before this is called. */ void __init setup_arch(char **cmdline_p) { memblock_reserve(__pa_symbol(_text), (unsigned long)__bss_stop - (unsigned long)_text); early_reserve_initrd(); /* * At this point everything still needed from the boot loader * or BIOS or kernel text should be early reserved or marked not * RAM in e820. All other memory is free game. */ #ifdef CONFIG_X86_32 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); /* * copy kernel address range established so far and switch * to the proper swapper page table */ clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY, initial_page_table + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); load_cr3(swapper_pg_dir); /* * Note: Quark X1000 CPUs advertise PGE incorrectly and require * a cr3 based tlb flush, so the following __flush_tlb_all() * will not flush anything because the cpu quirk which clears * X86_FEATURE_PGE has not been invoked yet. Though due to the * load_cr3() above the TLB has been flushed already. The * quirk is invoked before subsequent calls to __flush_tlb_all() * so proper operation is guaranteed. */ __flush_tlb_all(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); #endif /* * If we have OLPC OFW, we might end up relocating the fixmap due to * reserve_top(), so do this before touching the ioremap area. */ olpc_ofw_detect(); early_trap_init(); early_cpu_init(); early_ioremap_init(); setup_olpc_ofw_pgd(); ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev); screen_info = boot_params.screen_info; edid_info = boot_params.edid_info; #ifdef CONFIG_X86_32 apm_info.bios = boot_params.apm_bios_info; ist_info = boot_params.ist_info; if (boot_params.sys_desc_table.length != 0) { machine_id = boot_params.sys_desc_table.table[0]; machine_submodel_id = boot_params.sys_desc_table.table[1]; BIOS_revision = boot_params.sys_desc_table.table[2]; } #endif saved_video_mode = boot_params.hdr.vid_mode; bootloader_type = boot_params.hdr.type_of_loader; if ((bootloader_type >> 4) == 0xe) { bootloader_type &= 0xf; bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4; } bootloader_version = bootloader_type & 0xf; bootloader_version |= boot_params.hdr.ext_loader_ver << 4; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK; rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0); #endif #ifdef CONFIG_EFI if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, EFI32_LOADER_SIGNATURE, 4)) { set_bit(EFI_BOOT, &efi.flags); } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, EFI64_LOADER_SIGNATURE, 4)) { set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); } if (efi_enabled(EFI_BOOT)) efi_memblock_x86_reserve_range(); #endif x86_init.oem.arch_setup(); iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; setup_memory_map(); parse_setup_data(); copy_edd(); if (!boot_params.hdr.root_flags) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = _brk_end; code_resource.start = __pa_symbol(_text); code_resource.end = __pa_symbol(_etext)-1; data_resource.start = __pa_symbol(_etext); data_resource.end = __pa_symbol(_edata)-1; bss_resource.start = __pa_symbol(__bss_start); bss_resource.end = __pa_symbol(__bss_stop)-1; #ifdef CONFIG_CMDLINE_BOOL #ifdef CONFIG_CMDLINE_OVERRIDE strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); #else if (builtin_cmdline[0]) { /* append boot loader cmdline to builtin */ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE); strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); } #endif #endif strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; /* * x86_configure_nx() is called before parse_early_param() to detect * whether hardware doesn't support NX (so that the early EHCI debug * console setup can safely call set_fixmap()). It may then be called * again from within noexec_setup() during parsing early parameters * to honor the respective command line option. */ x86_configure_nx(); parse_early_param(); x86_report_nx(); /* after early param, so could get panic from serial */ memblock_x86_reserve_range_setup_data(); if (acpi_mps_check()) { #ifdef CONFIG_X86_LOCAL_APIC disable_apic = 1; #endif setup_clear_cpu_cap(X86_FEATURE_APIC); } #ifdef CONFIG_PCI if (pci_early_dump_regs) early_dump_pci_devices(); #endif /* update the e820_saved too */ e820_reserve_setup_data(); finish_e820_parsing(); if (efi_enabled(EFI_BOOT)) efi_init(); dmi_scan_machine(); dmi_memdev_walk(); dmi_set_dump_stack_arch_desc(); /* * VMware detection requires dmi to be available, so this * needs to be done after dmi_scan_machine, for the BP. */ init_hypervisor_platform(); x86_init.resources.probe_roms(); /* after parse_early_param, so could debug it */ insert_resource(&iomem_resource, &code_resource); insert_resource(&iomem_resource, &data_resource); insert_resource(&iomem_resource, &bss_resource); e820_add_kernel_range(); trim_bios_range(); #ifdef CONFIG_X86_32 if (ppro_with_ram_bug()) { e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM, E820_RESERVED); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); printk(KERN_INFO "fixed physical RAM map:\n"); e820_print_map("bad_ppro"); } #else early_gart_iommu_check(); #endif /* * partially used pages are not usable - thus * we are rounding upwards: */ max_pfn = e820_end_of_ram_pfn(); /* update e820 for memory not covered by WB MTRRs */ mtrr_bp_init(); if (mtrr_trim_uncached_memory(max_pfn)) max_pfn = e820_end_of_ram_pfn(); #ifdef CONFIG_X86_32 /* max_low_pfn get updated here */ find_low_pfn_range(); #else check_x2apic(); /* How many end-of-memory variables you have, grandma! */ /* need this before calling reserve_initrd */ if (max_pfn > (1UL<<(32 - PAGE_SHIFT))) max_low_pfn = e820_end_of_low_ram_pfn(); else max_low_pfn = max_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; #endif /* * Find and reserve possible boot-time SMP configuration: */ find_smp_config(); reserve_ibft_region(); early_alloc_pgt_buf(); /* * Need to conclude brk, before memblock_x86_fill() * it could use memblock_find_in_range, could overlap with * brk area. */ reserve_brk(); cleanup_highmap(); memblock_set_current_limit(ISA_END_ADDRESS); memblock_x86_fill(); /* * The EFI specification says that boot service code won't be called * after ExitBootServices(). This is, in fact, a lie. */ if (efi_enabled(EFI_MEMMAP)) efi_reserve_boot_services(); /* preallocate 4k for mptable mpc */ early_reserve_e820_mpc_new(); #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION setup_bios_corruption_check(); #endif #ifdef CONFIG_X86_32 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n", (max_pfn_mapped<<PAGE_SHIFT) - 1); #endif reserve_real_mode(); trim_platform_memory_ranges(); trim_low_memory_range(); init_mem_mapping(); early_trap_pf_init(); setup_real_mode(); memblock_set_current_limit(get_max_mapped()); /* * NOTE: On x86-32, only from this point on, fixmaps are ready for use. */ #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT if (init_ohci1394_dma_early) init_ohci1394_dma_on_all_controllers(); #endif /* Allocate bigger log buffer */ setup_log_buf(1); reserve_initrd(); #if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD) acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start); #endif vsmp_init(); io_delay_init(); /* * Parse the ACPI tables for possible boot-time SMP configuration. */ acpi_boot_table_init(); early_acpi_boot_init(); initmem_init(); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); /* * Reserve memory for crash kernel after SRAT is parsed so that it * won't consume hotpluggable memory. */ reserve_crashkernel(); memblock_find_dma_reserve(); #ifdef CONFIG_KVM_GUEST kvmclock_init(); #endif x86_init.paging.pagetable_init(); kasan_init(); if (boot_cpu_data.cpuid_level >= 0) { /* A CPU has %cr4 if and only if it has CPUID */ mmu_cr4_features = __read_cr4(); if (trampoline_cr4_features) *trampoline_cr4_features = mmu_cr4_features; } #ifdef CONFIG_X86_32 /* sync back kernel address range */ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); #endif tboot_probe(); #ifdef CONFIG_X86_64 map_vsyscall(); #endif generic_apic_probe(); early_quirks(); /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init(); sfi_init(); x86_dtb_init(); /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); prefill_possible_map(); init_cpu_to_node(); init_apic_mappings(); if (x86_io_apic_ops.init) x86_io_apic_ops.init(); kvm_guest_init(); e820_reserve_resources(); e820_mark_nosave_regions(max_low_pfn); x86_init.resources.reserve_resources(); e820_setup_gap(); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif x86_init.oem.banner(); x86_init.timers.wallclock_init(); mcheck_init(); arch_init_ideal_nops(); register_refined_jiffies(CLOCK_TICK_RATE); #ifdef CONFIG_EFI if (efi_enabled(EFI_BOOT)) efi_apply_memmap_quirks(); #endif } #ifdef CONFIG_X86_32 static struct resource video_ram_resource = { .name = "Video RAM area", .start = 0xa0000, .end = 0xbffff, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; void __init i386_reserve_resources(void) { request_resource(&iomem_resource, &video_ram_resource); reserve_standard_io_resources(); } #endif /* CONFIG_X86_32 */ static struct notifier_block kernel_offset_notifier = { .notifier_call = dump_kernel_offset }; static int __init register_kernel_offset_dumper(void) { atomic_notifier_chain_register(&panic_notifier_list, &kernel_offset_notifier); return 0; } __initcall(register_kernel_offset_dumper);
gpl-2.0
dezelin/kvm
arch/arm/mach-omap1/io.c
130
3887
/* * linux/arch/arm/mach-omap1/io.c * * OMAP1 I/O mapping code * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/tlb.h> #include <asm/mach/map.h> #include <plat/mux.h> #include <plat/tc.h> #include <plat/dma.h> #include "iomap.h" #include "common.h" #include "clock.h" /* * The machine specific code may provide the extra mapping besides the * default mapping provided here. */ static struct map_desc omap_io_desc[] __initdata = { { .virtual = OMAP1_IO_VIRT, .pfn = __phys_to_pfn(OMAP1_IO_PHYS), .length = OMAP1_IO_SIZE, .type = MT_DEVICE } }; #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) static struct map_desc omap7xx_io_desc[] __initdata = { { .virtual = OMAP7XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSP_START), .length = OMAP7XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP7XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP7XX_DSPREG_START), .length = OMAP7XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #ifdef CONFIG_ARCH_OMAP15XX static struct map_desc omap1510_io_desc[] __initdata = { { .virtual = OMAP1510_DSP_BASE, .pfn = __phys_to_pfn(OMAP1510_DSP_START), .length = OMAP1510_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP1510_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP1510_DSPREG_START), .length = OMAP1510_DSPREG_SIZE, .type = MT_DEVICE } }; #endif #if defined(CONFIG_ARCH_OMAP16XX) static struct map_desc omap16xx_io_desc[] __initdata = { { .virtual = OMAP16XX_DSP_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSP_START), .length = OMAP16XX_DSP_SIZE, .type = MT_DEVICE }, { .virtual = OMAP16XX_DSPREG_BASE, .pfn = __phys_to_pfn(OMAP16XX_DSPREG_START), .length = OMAP16XX_DSPREG_SIZE, .type = MT_DEVICE } }; #endif /* * Maps common IO regions for omap1 */ static void __init omap1_map_common_io(void) { iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc)); } #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) void __init omap7xx_map_io(void) { omap1_map_common_io(); iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc)); } #endif #ifdef CONFIG_ARCH_OMAP15XX void __init omap15xx_map_io(void) { omap1_map_common_io(); iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc)); } #endif #if defined(CONFIG_ARCH_OMAP16XX) void __init omap16xx_map_io(void) { omap1_map_common_io(); iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc)); } #endif /* * Common low-level hardware init for omap1. */ void __init omap1_init_early(void) { omap_check_revision(); /* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort * on a Posted Write in the TIPB Bridge". */ omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL); omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL); /* Must init clocks early to assure that timer interrupt works */ omap1_clk_init(); omap1_mux_init(); omap_init_consistent_dma_size(); } void __init omap1_init_late(void) { omap_serial_wakeup_init(); } /* * NOTE: Please use ioremap + __raw_read/write where possible instead of these */ u8 omap_readb(u32 pa) { return __raw_readb(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readb); u16 omap_readw(u32 pa) { return __raw_readw(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readw); u32 omap_readl(u32 pa) { return __raw_readl(OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_readl); void omap_writeb(u8 v, u32 pa) { __raw_writeb(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writeb); void omap_writew(u16 v, u32 pa) { __raw_writew(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writew); void omap_writel(u32 v, u32 pa) { __raw_writel(v, OMAP1_IO_ADDRESS(pa)); } EXPORT_SYMBOL(omap_writel);
gpl-2.0
ionux/linux
fs/f2fs/namei.c
130
24909
/* * fs/f2fs/namei.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/dcache.h> #include <linux/namei.h> #include "f2fs.h" #include "node.h" #include "xattr.h" #include "acl.h" #include <trace/events/f2fs.h> static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); nid_t ino; struct inode *inode; bool nid_free = false; int err; inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); f2fs_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { f2fs_unlock_op(sbi); err = -ENOSPC; goto fail; } f2fs_unlock_op(sbi); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto fail; } /* If the directory encrypted, then we should encrypt the inode. */ if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) f2fs_set_encrypted_inode(inode); if (f2fs_may_inline_data(inode)) set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY); f2fs_init_extent_tree(inode, NULL); stat_inc_inline_xattr(inode); stat_inc_inline_inode(inode); stat_inc_inline_dir(inode); trace_f2fs_new_inode(inode, 0); mark_inode_dirty(inode); return inode; fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); if (nid_free) set_inode_flag(F2FS_I(inode), FI_FREE_NID); iput(inode); return ERR_PTR(err); } static int is_multimedia_file(const unsigned char *s, const char *sub) { size_t slen = strlen(s); size_t sublen = strlen(sub); /* * filename format of multimedia file should be defined as: * "filename + '.' + extension". */ if (slen < sublen + 2) return 0; if (s[slen - sublen - 1] != '.') return 0; return !strncasecmp(s + slen - sublen, sub, sublen); } /* * Set multimedia files as cold files for hot/cold data separation */ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, const unsigned char *name) { int i; __u8 (*extlist)[8] = sbi->raw_super->extension_list; int count = le32_to_cpu(sbi->raw_super->extension_count); for (i = 0; i < count; i++) { if (is_multimedia_file(name, extlist[i])) { file_set_cold(inode); break; } } } static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; nid_t ino = 0; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) set_cold_files(sbi, inode, dentry->d_name.name); inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; ino = inode->i_ino; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); struct f2fs_sb_info *sbi = F2FS_I_SB(dir); int err; if (f2fs_encrypted_inode(dir) && !f2fs_is_child_context_consistent_with_parent(dir, inode)) return -EPERM; f2fs_balance_fs(sbi); inode->i_ctime = CURRENT_TIME; ihold(inode); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); d_instantiate(dentry, inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); iput(inode); f2fs_unlock_op(sbi); return err; } struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = QSTR_INIT(".", 1); struct qstr dotdot = QSTR_INIT("..", 2); struct f2fs_dir_entry *de; struct page *page; int err = 0; f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) { clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS); mark_inode_dirty(dir); } f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) return d_splice_alias(inode, dentry); ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } return d_splice_alias(inode, dentry); err_out: iget_failed(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static const char *f2fs_follow_link(struct dentry *dentry, void **cookie) { const char *link = page_follow_link_light(dentry, cookie); if (!IS_ERR(link) && !*link) { /* this is broken symlink case */ page_put_link(NULL, *cookie); link = ERR_PTR(-ENOENT); } return link; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); size_t p_len; char *p_str; struct f2fs_str disk_link = FSTR_INIT(NULL, 0); struct f2fs_encrypted_symlink_data *sd = NULL; int err; if (len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(dir)) { struct qstr istr = QSTR_INIT(symname, len); err = f2fs_get_encryption_info(inode); if (err) goto err_out; err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link); if (err) goto err_out; err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link); if (err < 0) goto err_out; p_len = encrypted_symlink_data_len(disk_link.len) + 1; if (p_len > dir->i_sb->s_blocksize) { err = -ENAMETOOLONG; goto err_out; } sd = kzalloc(p_len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } memcpy(sd->encrypted_path, disk_link.name, disk_link.len); sd->len = cpu_to_le16(disk_link.len); p_str = (char *)sd; } else { p_len = len + 1; p_str = (char *)symname; } err = page_symlink(inode, p_str, p_len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) { filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); } else { f2fs_unlink(dir, dentry); } kfree(sd); f2fs_fname_crypto_free_buffer(&disk_link); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode, struct inode **whiteout) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; if (!whiteout) f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); if (whiteout) { init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); inode->i_op = &f2fs_special_inode_operations; } else { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto out; err = f2fs_do_tmpfile(inode, dir); if (err) goto release_out; /* * add this non-linked tmpfile to orphan list, in this way we could * remove all unused data of tmpfile after abnormal power-off. */ add_orphan_inode(sbi, inode->i_ino); f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (whiteout) { inode_dec_link_count(inode); *whiteout = inode; } else { d_tmpfile(dentry, inode); } unlock_new_inode(inode); return 0; release_out: release_orphan_inode(sbi); out: handle_failed_inode(inode); return err; } static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { if (f2fs_encrypted_inode(dir)) { int err = f2fs_get_encryption_info(dir); if (err) return err; } return __f2fs_tmpfile(dir, dentry, mode, NULL); } static int f2fs_create_whiteout(struct inode *dir, struct inode **whiteout) { return __f2fs_tmpfile(dir, NULL, S_IFCHR | WHITEOUT_MODE, whiteout); } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct inode *whiteout = NULL; struct page *old_dir_page; struct page *old_page, *new_page = NULL; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !f2fs_is_child_context_consistent_with_parent(new_dir, old_inode)) { err = -EPERM; goto out; } f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (flags & RENAME_WHITEOUT) { err = f2fs_create_whiteout(old_dir, &whiteout); if (err) goto out_dir; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_whiteout; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_whiteout; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, new_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_whiteout; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (whiteout) { whiteout->i_state |= I_LINKABLE; set_inode_flag(F2FS_I(whiteout), FI_INC_LINK); err = f2fs_add_link(old_dentry, whiteout); if (err) goto put_out_dir; whiteout->i_state &= ~I_LINKABLE; iput(whiteout); } if (old_dir_entry) { if (old_dir != new_dir && !whiteout) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); if (new_page) { f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); } out_whiteout: if (whiteout) iput(whiteout); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page, *new_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL, *new_dir_entry = NULL; struct f2fs_dir_entry *old_entry, *new_entry; int old_nlink = 0, new_nlink = 0; int err = -ENOENT; if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) && (old_dir != new_dir) && (!f2fs_is_child_context_consistent_with_parent(new_dir, old_inode) || !f2fs_is_child_context_consistent_with_parent(old_dir, new_inode))) return -EPERM; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_old; /* prepare for updating ".." directory entry info later */ if (old_dir != new_dir) { if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_new; } if (S_ISDIR(new_inode->i_mode)) { err = -EIO; new_dir_entry = f2fs_parent_dir(new_inode, &new_dir_page); if (!new_dir_entry) goto out_old_dir; } } /* * If cross rename between file and directory those are not * in the same directory, we will inc nlink of file's parent * later, so we should check upper boundary of its nlink. */ if ((!old_dir_entry || !new_dir_entry) && old_dir_entry != new_dir_entry) { old_nlink = old_dir_entry ? -1 : 1; new_nlink = -old_nlink; err = -EMLINK; if ((old_nlink > 0 && old_inode->i_nlink >= F2FS_LINK_MAX) || (new_nlink > 0 && new_inode->i_nlink >= F2FS_LINK_MAX)) goto out_new_dir; } f2fs_lock_op(sbi); err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name); if (err) goto out_unlock; if (file_enc_name(new_inode)) file_set_enc_name(old_inode); err = update_dent_inode(new_inode, old_inode, &old_dentry->d_name); if (err) goto out_undo; if (file_enc_name(old_inode)) file_set_enc_name(new_inode); /* update ".." directory entry info of old dentry */ if (old_dir_entry) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); /* update ".." directory entry info of new dentry */ if (new_dir_entry) f2fs_set_link(new_inode, new_dir_entry, new_dir_page, old_dir); /* update directory entry info of old dir inode */ f2fs_set_link(old_dir, old_entry, old_page, new_inode); down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); update_inode_page(old_inode); old_dir->i_ctime = CURRENT_TIME; if (old_nlink) { down_write(&F2FS_I(old_dir)->i_sem); if (old_nlink < 0) drop_nlink(old_dir); else inc_nlink(old_dir); up_write(&F2FS_I(old_dir)->i_sem); } mark_inode_dirty(old_dir); update_inode_page(old_dir); /* update directory entry info of new dir inode */ f2fs_set_link(new_dir, new_entry, new_page, old_inode); down_write(&F2FS_I(new_inode)->i_sem); file_lost_pino(new_inode); up_write(&F2FS_I(new_inode)->i_sem); update_inode_page(new_inode); new_dir->i_ctime = CURRENT_TIME; if (new_nlink) { down_write(&F2FS_I(new_dir)->i_sem); if (new_nlink < 0) drop_nlink(new_dir); else inc_nlink(new_dir); up_write(&F2FS_I(new_dir)->i_sem); } mark_inode_dirty(new_dir); update_inode_page(new_dir); f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_undo: /* * Still we may fail to recover name info of f2fs_inode here * Drop it, once its name is set as encrypted */ update_dent_inode(old_inode, old_inode, &old_dentry->d_name); out_unlock: f2fs_unlock_op(sbi); out_new_dir: if (new_dir_entry) { f2fs_dentry_kunmap(new_inode, new_dir_page); f2fs_put_page(new_dir_page, 0); } out_old_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_new: f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if (flags & RENAME_EXCHANGE) { return f2fs_cross_rename(old_dir, old_dentry, new_dir, new_dentry); } /* * VFS has already handled the new dentry existence case, * here, we just deal with "RENAME_NOREPLACE" as regular rename. */ return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } #ifdef CONFIG_F2FS_FS_ENCRYPTION static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct f2fs_str cstr; struct f2fs_str pstr = FSTR_INIT(NULL, 0); struct inode *inode = d_inode(dentry); struct f2fs_encrypted_symlink_data *sd; loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = f2fs_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return ERR_CAST(cpage); caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct f2fs_encrypted_symlink_data *)caddr; cstr.len = le16_to_cpu(sd->len); cstr.name = kmalloc(cstr.len, GFP_NOFS); if (!cstr.name) { res = -ENOMEM; goto errout; } memcpy(cstr.name, sd->encrypted_path, cstr.len); /* this is broken symlink case */ if (cstr.name[0] == 0 && cstr.len == 0) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr); if (res < 0) goto errout; kfree(cstr.name); paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; kunmap(cpage); page_cache_release(cpage); return *cookie = paddr; errout: kfree(cstr.name); f2fs_fname_crypto_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_encrypted_follow_link, .put_link = kfree_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, }; #endif const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename2 = f2fs_rename2, .tmpfile = f2fs_tmpfile, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_follow_link, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
gpl-2.0
ankitC/RK-kernel
drivers/staging/iio/accel/adis16209_core.c
386
13870
/* * ADIS16209 Programmable Digital Vibration Sensor driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include "../iio.h" #include "../sysfs.h" #include "../ring_generic.h" #include "accel.h" #include "inclinometer.h" #include "../adc/adc.h" #include "adis16209.h" #define DRIVER_NAME "adis16209" /** * adis16209_spi_write_reg_8() - write single byte to a register * @indio_dev: iio device associated with actual device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16209_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16209_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16209_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio device associated actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16209_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16209_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 30, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, .delay_usecs = 30, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16209_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16209_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio device associated with device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16209_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16209_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 30, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .delay_usecs = 30, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16209_READ_REG(lower_reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int adis16209_reset(struct iio_dev *indio_dev) { int ret; ret = adis16209_spi_write_reg_8(indio_dev, ADIS16209_GLOB_CMD, ADIS16209_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16209_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); if (len < 1) return -EINVAL; switch (buf[0]) { case '1': case 'y': case 'Y': return adis16209_reset(indio_dev); } return -EINVAL; } int adis16209_set_irq(struct iio_dev *indio_dev, bool enable) { int ret = 0; u16 msc; ret = adis16209_spi_read_reg_16(indio_dev, ADIS16209_MSC_CTRL, &msc); if (ret) goto error_ret; msc |= ADIS16209_MSC_CTRL_ACTIVE_HIGH; msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_DIO2; if (enable) msc |= ADIS16209_MSC_CTRL_DATA_RDY_EN; else msc &= ~ADIS16209_MSC_CTRL_DATA_RDY_EN; ret = adis16209_spi_write_reg_16(indio_dev, ADIS16209_MSC_CTRL, msc); error_ret: return ret; } static int adis16209_check_status(struct iio_dev *indio_dev) { u16 status; int ret; ret = adis16209_spi_read_reg_16(indio_dev, ADIS16209_DIAG_STAT, &status); if (ret < 0) { dev_err(&indio_dev->dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x1F; if (status & ADIS16209_DIAG_STAT_SELFTEST_FAIL) dev_err(&indio_dev->dev, "Self test failure\n"); if (status & ADIS16209_DIAG_STAT_SPI_FAIL) dev_err(&indio_dev->dev, "SPI failure\n"); if (status & ADIS16209_DIAG_STAT_FLASH_UPT) dev_err(&indio_dev->dev, "Flash update failed\n"); if (status & ADIS16209_DIAG_STAT_POWER_HIGH) dev_err(&indio_dev->dev, "Power supply above 3.625V\n"); if (status & ADIS16209_DIAG_STAT_POWER_LOW) dev_err(&indio_dev->dev, "Power supply below 3.15V\n"); error_ret: return ret; } static int adis16209_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16209_spi_write_reg_16(indio_dev, ADIS16209_MSC_CTRL, ADIS16209_MSC_CTRL_SELF_TEST_EN); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } adis16209_check_status(indio_dev); err_ret: return ret; } static int adis16209_initial_setup(struct iio_dev *indio_dev) { int ret; /* Disable IRQ */ ret = adis16209_set_irq(indio_dev, false); if (ret) { dev_err(&indio_dev->dev, "disable irq failed"); goto err_ret; } /* Do self test */ ret = adis16209_self_test(indio_dev); if (ret) { dev_err(&indio_dev->dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16209_check_status(indio_dev); if (ret) { adis16209_reset(indio_dev); dev_err(&indio_dev->dev, "device not playing ball -> reset"); msleep(ADIS16209_STARTUP_DELAY); ret = adis16209_check_status(indio_dev); if (ret) { dev_err(&indio_dev->dev, "giving up"); goto err_ret; } } err_ret: return ret; } enum adis16209_chan { in_supply, temp, accel_x, accel_y, incli_x, incli_y, in_aux, rot, }; static const u8 adis16209_addresses[8][2] = { [in_supply] = { ADIS16209_SUPPLY_OUT }, [in_aux] = { ADIS16209_AUX_ADC }, [accel_x] = { ADIS16209_XACCL_OUT, ADIS16209_XACCL_NULL }, [accel_y] = { ADIS16209_YACCL_OUT, ADIS16209_YACCL_NULL }, [incli_x] = { ADIS16209_XINCL_OUT, ADIS16209_XINCL_NULL }, [incli_y] = { ADIS16209_YINCL_OUT, ADIS16209_YINCL_NULL }, [rot] = { ADIS16209_ROT_OUT }, [temp] = { ADIS16209_TEMP_OUT }, }; static int adis16209_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int bits; s16 val16; u8 addr; switch (mask) { case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE): switch (chan->type) { case IIO_ACCEL: case IIO_INCLI: bits = 14; break; default: return -EINVAL; }; val16 = val & ((1 << bits) - 1); addr = adis16209_addresses[chan->address][1]; return adis16209_spi_write_reg_16(indio_dev, addr, val16); } return -EINVAL; } static int adis16209_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; int bits; u8 addr; s16 val16; switch (mask) { case 0: mutex_lock(&indio_dev->mlock); addr = adis16209_addresses[chan->address][0]; ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } if (val16 & ADIS16209_ERROR_ACTIVE) { ret = adis16209_check_status(indio_dev); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') val16 = (s16)(val16 << (16 - chan->scan_type.realbits)) >> (16 - chan->scan_type.realbits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case (1 << IIO_CHAN_INFO_SCALE_SEPARATE): case (1 << IIO_CHAN_INFO_SCALE_SHARED): switch (chan->type) { case IIO_IN: *val = 0; if (chan->channel == 0) *val2 = 305180; else *val2 = 610500; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 0; *val2 = -470000; return IIO_VAL_INT_PLUS_MICRO; case IIO_ACCEL: *val = 0; *val2 = 2394; return IIO_VAL_INT_PLUS_MICRO; case IIO_INCLI: *val = 0; *val2 = 436; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } break; case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE): *val = 25; return IIO_VAL_INT; case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE): switch (chan->type) { case IIO_ACCEL: bits = 14; break; default: return -EINVAL; }; mutex_lock(&indio_dev->mlock); addr = adis16209_addresses[chan->address][1]; ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; } return -EINVAL; } static struct iio_chan_spec adis16209_channels[] = { IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SEPARATE), in_supply, ADIS16209_SCAN_SUPPLY, IIO_ST('u', 14, 16, 0), 0), IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, (1 << IIO_CHAN_INFO_SCALE_SEPARATE) | (1 << IIO_CHAN_INFO_OFFSET_SEPARATE), temp, ADIS16209_SCAN_TEMP, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X, (1 << IIO_CHAN_INFO_SCALE_SHARED) | (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), accel_x, ADIS16209_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y, (1 << IIO_CHAN_INFO_SCALE_SHARED) | (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), accel_y, ADIS16209_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0, (1 << IIO_CHAN_INFO_SCALE_SEPARATE), in_aux, ADIS16209_SCAN_AUX_ADC, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_INCLI, 0, 1, 0, NULL, 0, IIO_MOD_X, (1 << IIO_CHAN_INFO_SCALE_SHARED), incli_x, ADIS16209_SCAN_INCLI_X, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_INCLI, 0, 1, 0, NULL, 0, IIO_MOD_Y, (1 << IIO_CHAN_INFO_SCALE_SHARED), incli_y, ADIS16209_SCAN_INCLI_Y, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_ROT, 0, 1, 0, NULL, 0, IIO_MOD_X, 0, rot, ADIS16209_SCAN_ROT, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(8) }; static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16209_write_reset, 0); static struct attribute *adis16209_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, NULL }; static const struct attribute_group adis16209_attribute_group = { .attrs = adis16209_attributes, }; static const struct iio_info adis16209_info = { .attrs = &adis16209_attribute_group, .read_raw = &adis16209_read_raw, .write_raw = &adis16209_write_raw, .driver_module = THIS_MODULE, }; static int __devinit adis16209_probe(struct spi_device *spi) { int ret, regdone = 0; struct adis16209_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16209_info; indio_dev->channels = adis16209_channels; indio_dev->num_channels = ARRAY_SIZE(adis16209_channels); indio_dev->modes = INDIO_DIRECT_MODE; ret = adis16209_configure_ring(indio_dev); if (ret) goto error_free_dev; ret = iio_device_register(indio_dev); if (ret) goto error_unreg_ring_funcs; regdone = 1; ret = iio_ring_buffer_register_ex(indio_dev->ring, 0, adis16209_channels, ARRAY_SIZE(adis16209_channels)); if (ret) { printk(KERN_ERR "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } if (spi->irq) { ret = adis16209_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } /* Get the device into a sane initial state */ ret = adis16209_initial_setup(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: adis16209_remove_trigger(indio_dev); error_uninitialize_ring: iio_ring_buffer_unregister(indio_dev->ring); error_unreg_ring_funcs: adis16209_unconfigure_ring(indio_dev); error_free_dev: if (regdone) iio_device_unregister(indio_dev); else iio_free_device(indio_dev); error_ret: return ret; } static int adis16209_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); flush_scheduled_work(); adis16209_remove_trigger(indio_dev); iio_ring_buffer_unregister(indio_dev->ring); iio_device_unregister(indio_dev); adis16209_unconfigure_ring(indio_dev); return 0; } static struct spi_driver adis16209_driver = { .driver = { .name = "adis16209", .owner = THIS_MODULE, }, .probe = adis16209_probe, .remove = __devexit_p(adis16209_remove), }; static __init int adis16209_init(void) { return spi_register_driver(&adis16209_driver); } module_init(adis16209_init); static __exit void adis16209_exit(void) { spi_unregister_driver(&adis16209_driver); } module_exit(adis16209_exit); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
arpith20/ubuntu-vivid
drivers/net/wireless/libertas/main.c
642
29898
/* * This file contains the major functions in WLAN * driver. It includes init, exit, open, close and main * thread etc.. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/kthread.h> #include <linux/kfifo.h> #include <linux/slab.h> #include <net/cfg80211.h> #include "host.h" #include "decl.h" #include "dev.h" #include "cfg.h" #include "debugfs.h" #include "cmd.h" #include "mesh.h" #define DRIVER_RELEASE_VERSION "323.p0" const char lbs_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION #ifdef DEBUG "-dbg" #endif ""; /* Module parameters */ unsigned int lbs_debug; EXPORT_SYMBOL_GPL(lbs_debug); module_param_named(libertas_debug, lbs_debug, int, 0644); unsigned int lbs_disablemesh; EXPORT_SYMBOL_GPL(lbs_disablemesh); module_param_named(libertas_disablemesh, lbs_disablemesh, int, 0644); /* * This global structure is used to send the confirm_sleep command as * fast as possible down to the firmware. */ struct cmd_confirm_sleep confirm_sleep; /* * the table to keep region code */ u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] = { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; /* * FW rate table. FW refers to rates by their index in this table, not by the * rate value itself. Values of 0x00 are * reserved positions. */ static u8 fw_data_rates[MAX_RATES] = { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 }; /** * lbs_fw_index_to_data_rate - use index to get the data rate * * @idx: The index of data rate * returns: data rate or 0 */ u32 lbs_fw_index_to_data_rate(u8 idx) { if (idx >= sizeof(fw_data_rates)) idx = 0; return fw_data_rates[idx]; } /** * lbs_data_rate_to_fw_index - use rate to get the index * * @rate: data rate * returns: index or 0 */ u8 lbs_data_rate_to_fw_index(u32 rate) { u8 i; if (!rate) return 0; for (i = 0; i < sizeof(fw_data_rates); i++) { if (rate == fw_data_rates[i]) return i; } return 0; } int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type) { int ret = 0; switch (type) { case NL80211_IFTYPE_MONITOR: ret = lbs_set_monitor_mode(priv, 1); break; case NL80211_IFTYPE_STATION: if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) ret = lbs_set_monitor_mode(priv, 0); if (!ret) ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 1); break; case NL80211_IFTYPE_ADHOC: if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) ret = lbs_set_monitor_mode(priv, 0); if (!ret) ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, 2); break; default: ret = -ENOTSUPP; } return ret; } int lbs_start_iface(struct lbs_private *priv) { struct cmd_ds_802_11_mac_address cmd; int ret; if (priv->power_restore) { ret = priv->power_restore(priv); if (ret) return ret; } cmd.hdr.size = cpu_to_le16(sizeof(cmd)); cmd.action = cpu_to_le16(CMD_ACT_SET); memcpy(cmd.macadd, priv->current_addr, ETH_ALEN); ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd); if (ret) { lbs_deb_net("set MAC address failed\n"); goto err; } ret = lbs_set_iface_type(priv, priv->wdev->iftype); if (ret) { lbs_deb_net("set iface type failed\n"); goto err; } ret = lbs_set_11d_domain_info(priv); if (ret) { lbs_deb_net("set 11d domain info failed\n"); goto err; } lbs_update_channel(priv); priv->iface_running = true; return 0; err: if (priv->power_save) priv->power_save(priv); return ret; } /** * lbs_dev_open - open the ethX interface * * @dev: A pointer to &net_device structure * returns: 0 or -EBUSY if monitor mode active */ static int lbs_dev_open(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; int ret = 0; lbs_deb_enter(LBS_DEB_NET); if (!priv->iface_running) { ret = lbs_start_iface(priv); if (ret) goto out; } spin_lock_irq(&priv->driver_lock); netif_carrier_off(dev); if (!priv->tx_pending_len) netif_wake_queue(dev); spin_unlock_irq(&priv->driver_lock); out: lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); return ret; } static bool lbs_command_queue_empty(struct lbs_private *priv) { unsigned long flags; bool ret; spin_lock_irqsave(&priv->driver_lock, flags); ret = priv->cur_cmd == NULL && list_empty(&priv->cmdpendingq); spin_unlock_irqrestore(&priv->driver_lock, flags); return ret; } int lbs_stop_iface(struct lbs_private *priv) { unsigned long flags; int ret = 0; lbs_deb_enter(LBS_DEB_MAIN); spin_lock_irqsave(&priv->driver_lock, flags); priv->iface_running = false; kfree_skb(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); cancel_work_sync(&priv->mcast_work); del_timer_sync(&priv->tx_lockup_timer); /* Disable command processing, and wait for all commands to complete */ lbs_deb_main("waiting for commands to complete\n"); wait_event(priv->waitq, lbs_command_queue_empty(priv)); lbs_deb_main("all commands completed\n"); if (priv->power_save) ret = priv->power_save(priv); lbs_deb_leave(LBS_DEB_MAIN); return ret; } /** * lbs_eth_stop - close the ethX interface * * @dev: A pointer to &net_device structure * returns: 0 */ static int lbs_eth_stop(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; lbs_deb_enter(LBS_DEB_NET); if (priv->connect_status == LBS_CONNECTED) lbs_disconnect(priv, WLAN_REASON_DEAUTH_LEAVING); spin_lock_irq(&priv->driver_lock); netif_stop_queue(dev); spin_unlock_irq(&priv->driver_lock); lbs_update_mcast(priv); cancel_delayed_work_sync(&priv->scan_work); if (priv->scan_req) lbs_scan_done(priv); netif_carrier_off(priv->dev); if (!lbs_iface_active(priv)) lbs_stop_iface(priv); lbs_deb_leave(LBS_DEB_NET); return 0; } void lbs_host_to_card_done(struct lbs_private *priv) { unsigned long flags; lbs_deb_enter(LBS_DEB_THREAD); spin_lock_irqsave(&priv->driver_lock, flags); del_timer(&priv->tx_lockup_timer); priv->dnld_sent = DNLD_RES_RECEIVED; /* Wake main thread if commands are pending */ if (!priv->cur_cmd || priv->tx_pending_len > 0) { if (!priv->wakeup_dev_required) wake_up(&priv->waitq); } spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_leave(LBS_DEB_THREAD); } EXPORT_SYMBOL_GPL(lbs_host_to_card_done); int lbs_set_mac_address(struct net_device *dev, void *addr) { int ret = 0; struct lbs_private *priv = dev->ml_priv; struct sockaddr *phwaddr = addr; lbs_deb_enter(LBS_DEB_NET); /* * Can only set MAC address when all interfaces are down, to be written * to the hardware when one of them is brought up. */ if (lbs_iface_active(priv)) return -EBUSY; /* In case it was called from the mesh device */ dev = priv->dev; memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN); memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN); if (priv->mesh_dev) memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN); lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); return ret; } static inline int mac_in_list(unsigned char *list, int list_len, unsigned char *mac) { while (list_len) { if (!memcmp(list, mac, ETH_ALEN)) return 1; list += ETH_ALEN; list_len--; } return 0; } static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, struct net_device *dev, int nr_addrs) { int i = nr_addrs; struct netdev_hw_addr *ha; int cnt; if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) return nr_addrs; netif_addr_lock_bh(dev); cnt = netdev_mc_count(dev); netdev_for_each_mc_addr(ha, dev) { if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) { lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, ha->addr); cnt--; continue; } if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) break; memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN); lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, ha->addr); i++; cnt--; } netif_addr_unlock_bh(dev); if (cnt) return -EOVERFLOW; return i; } void lbs_update_mcast(struct lbs_private *priv) { struct cmd_ds_mac_multicast_adr mcast_cmd; int dev_flags = 0; int nr_addrs; int old_mac_control = priv->mac_control; lbs_deb_enter(LBS_DEB_NET); if (netif_running(priv->dev)) dev_flags |= priv->dev->flags; if (priv->mesh_dev && netif_running(priv->mesh_dev)) dev_flags |= priv->mesh_dev->flags; if (dev_flags & IFF_PROMISC) { priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; priv->mac_control &= ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | CMD_ACT_MAC_MULTICAST_ENABLE); goto out_set_mac_control; } else if (dev_flags & IFF_ALLMULTI) { do_allmulti: priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE; priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | CMD_ACT_MAC_MULTICAST_ENABLE); goto out_set_mac_control; } /* Once for priv->dev, again for priv->mesh_dev if it exists */ nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->dev, 0); if (nr_addrs >= 0 && priv->mesh_dev) nr_addrs = lbs_add_mcast_addrs(&mcast_cmd, priv->mesh_dev, nr_addrs); if (nr_addrs < 0) goto do_allmulti; if (nr_addrs) { int size = offsetof(struct cmd_ds_mac_multicast_adr, maclist[6*nr_addrs]); mcast_cmd.action = cpu_to_le16(CMD_ACT_SET); mcast_cmd.hdr.size = cpu_to_le16(size); mcast_cmd.nr_of_adrs = cpu_to_le16(nr_addrs); lbs_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &mcast_cmd.hdr, size); priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE; } else priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE; priv->mac_control &= ~(CMD_ACT_MAC_PROMISCUOUS_ENABLE | CMD_ACT_MAC_ALL_MULTICAST_ENABLE); out_set_mac_control: if (priv->mac_control != old_mac_control) lbs_set_mac_control(priv); lbs_deb_leave(LBS_DEB_NET); } static void lbs_set_mcast_worker(struct work_struct *work) { struct lbs_private *priv = container_of(work, struct lbs_private, mcast_work); lbs_update_mcast(priv); } void lbs_set_multicast_list(struct net_device *dev) { struct lbs_private *priv = dev->ml_priv; schedule_work(&priv->mcast_work); } /** * lbs_thread - handles the major jobs in the LBS driver. * It handles all events generated by firmware, RX data received * from firmware and TX data sent from kernel. * * @data: A pointer to &lbs_thread structure * returns: 0 */ static int lbs_thread(void *data) { struct net_device *dev = data; struct lbs_private *priv = dev->ml_priv; wait_queue_t wait; lbs_deb_enter(LBS_DEB_THREAD); init_waitqueue_entry(&wait, current); for (;;) { int shouldsleep; u8 resp_idx; lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n", priv->currenttxskb, priv->dnld_sent); add_wait_queue(&priv->waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&priv->driver_lock); if (kthread_should_stop()) shouldsleep = 0; /* Bye */ else if (priv->surpriseremoved) shouldsleep = 1; /* We need to wait until we're _told_ to die */ else if (priv->psstate == PS_STATE_SLEEP) shouldsleep = 1; /* Sleep mode. Nothing we can do till it wakes */ else if (priv->cmd_timed_out) shouldsleep = 0; /* Command timed out. Recover */ else if (!priv->fw_ready) shouldsleep = 1; /* Firmware not ready. We're waiting for it */ else if (priv->dnld_sent) shouldsleep = 1; /* Something is en route to the device already */ else if (priv->tx_pending_len > 0) shouldsleep = 0; /* We've a packet to send */ else if (priv->resp_len[priv->resp_idx]) shouldsleep = 0; /* We have a command response */ else if (priv->cur_cmd) shouldsleep = 1; /* Can't send a command; one already running */ else if (!list_empty(&priv->cmdpendingq) && !(priv->wakeup_dev_required)) shouldsleep = 0; /* We have a command to send */ else if (kfifo_len(&priv->event_fifo)) shouldsleep = 0; /* We have an event to process */ else shouldsleep = 1; /* No command */ if (shouldsleep) { lbs_deb_thread("sleeping, connect_status %d, " "psmode %d, psstate %d\n", priv->connect_status, priv->psmode, priv->psstate); spin_unlock_irq(&priv->driver_lock); schedule(); } else spin_unlock_irq(&priv->driver_lock); lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n", priv->currenttxskb, priv->dnld_sent); set_current_state(TASK_RUNNING); remove_wait_queue(&priv->waitq, &wait); lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n", priv->currenttxskb, priv->dnld_sent); if (kthread_should_stop()) { lbs_deb_thread("break from main thread\n"); break; } if (priv->surpriseremoved) { lbs_deb_thread("adapter removed; waiting to die...\n"); continue; } lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n", priv->currenttxskb, priv->dnld_sent); /* Process any pending command response */ spin_lock_irq(&priv->driver_lock); resp_idx = priv->resp_idx; if (priv->resp_len[resp_idx]) { spin_unlock_irq(&priv->driver_lock); lbs_process_command_response(priv, priv->resp_buf[resp_idx], priv->resp_len[resp_idx]); spin_lock_irq(&priv->driver_lock); priv->resp_len[resp_idx] = 0; } spin_unlock_irq(&priv->driver_lock); /* Process hardware events, e.g. card removed, link lost */ spin_lock_irq(&priv->driver_lock); while (kfifo_len(&priv->event_fifo)) { u32 event; if (kfifo_out(&priv->event_fifo, (unsigned char *) &event, sizeof(event)) != sizeof(event)) break; spin_unlock_irq(&priv->driver_lock); lbs_process_event(priv, event); spin_lock_irq(&priv->driver_lock); } spin_unlock_irq(&priv->driver_lock); if (priv->wakeup_dev_required) { lbs_deb_thread("Waking up device...\n"); /* Wake up device */ if (priv->exit_deep_sleep(priv)) lbs_deb_thread("Wakeup device failed\n"); continue; } /* command timeout stuff */ if (priv->cmd_timed_out && priv->cur_cmd) { struct cmd_ctrl_node *cmdnode = priv->cur_cmd; netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); /* Reset card, but only when it isn't in the process * of being shutdown anyway. */ if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; if (!priv->fw_ready) continue; /* Check if we need to confirm Sleep Request received previously */ if (priv->psstate == PS_STATE_PRE_SLEEP && !priv->dnld_sent && !priv->cur_cmd) { if (priv->connect_status == LBS_CONNECTED) { lbs_deb_thread("pre-sleep, currenttxskb %p, " "dnld_sent %d, cur_cmd %p\n", priv->currenttxskb, priv->dnld_sent, priv->cur_cmd); lbs_ps_confirm_sleep(priv); } else { /* workaround for firmware sending * deauth/linkloss event immediately * after sleep request; remove this * after firmware fixes it */ priv->psstate = PS_STATE_AWAKE; netdev_alert(dev, "ignore PS_SleepConfirm in non-connected state\n"); } } /* The PS state is changed during processing of Sleep Request * event above */ if ((priv->psstate == PS_STATE_SLEEP) || (priv->psstate == PS_STATE_PRE_SLEEP)) continue; if (priv->is_deep_sleep) continue; /* Execute the next command */ if (!priv->dnld_sent && !priv->cur_cmd) lbs_execute_next_command(priv); spin_lock_irq(&priv->driver_lock); if (!priv->dnld_sent && priv->tx_pending_len > 0) { int ret = priv->hw_host_to_card(priv, MVMS_DAT, priv->tx_pending_buf, priv->tx_pending_len); if (ret) { lbs_deb_tx("host_to_card failed %d\n", ret); priv->dnld_sent = DNLD_RES_RECEIVED; } else { mod_timer(&priv->tx_lockup_timer, jiffies + (HZ * 5)); } priv->tx_pending_len = 0; if (!priv->currenttxskb) { /* We can wake the queues immediately if we aren't waiting for TX feedback */ if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && netif_running(priv->mesh_dev)) netif_wake_queue(priv->mesh_dev); } } spin_unlock_irq(&priv->driver_lock); } del_timer(&priv->command_timer); del_timer(&priv->tx_lockup_timer); del_timer(&priv->auto_deepsleep_timer); lbs_deb_leave(LBS_DEB_THREAD); return 0; } /** * lbs_setup_firmware - gets the HW spec from the firmware and sets * some basic parameters * * @priv: A pointer to &struct lbs_private structure * returns: 0 or -1 */ static int lbs_setup_firmware(struct lbs_private *priv) { int ret = -1; s16 curlevel = 0, minlevel = 0, maxlevel = 0; lbs_deb_enter(LBS_DEB_FW); /* Read MAC address from firmware */ memset(priv->current_addr, 0xff, ETH_ALEN); ret = lbs_update_hw_spec(priv); if (ret) goto done; /* Read power levels if available */ ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel); if (ret == 0) { priv->txpower_cur = curlevel; priv->txpower_min = minlevel; priv->txpower_max = maxlevel; } /* Send cmd to FW to enable 11D function */ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); if (ret) goto done; ret = lbs_set_mac_control_sync(priv); done: lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); return ret; } int lbs_suspend(struct lbs_private *priv) { int ret; lbs_deb_enter(LBS_DEB_FW); if (priv->is_deep_sleep) { ret = lbs_set_deep_sleep(priv, 0); if (ret) { netdev_err(priv->dev, "deep sleep cancellation failed: %d\n", ret); return ret; } priv->deep_sleep_required = 1; } ret = lbs_set_host_sleep(priv, 1); netif_device_detach(priv->dev); if (priv->mesh_dev) netif_device_detach(priv->mesh_dev); lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(lbs_suspend); int lbs_resume(struct lbs_private *priv) { int ret; lbs_deb_enter(LBS_DEB_FW); ret = lbs_set_host_sleep(priv, 0); netif_device_attach(priv->dev); if (priv->mesh_dev) netif_device_attach(priv->mesh_dev); if (priv->deep_sleep_required) { priv->deep_sleep_required = 0; ret = lbs_set_deep_sleep(priv, 1); if (ret) netdev_err(priv->dev, "deep sleep activation failed: %d\n", ret); } if (priv->setup_fw_on_resume) ret = lbs_setup_firmware(priv); lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(lbs_resume); /** * lbs_cmd_timeout_handler - handles the timeout of command sending. * It will re-send the same command again. * * @data: &struct lbs_private pointer */ static void lbs_cmd_timeout_handler(unsigned long data) { struct lbs_private *priv = (struct lbs_private *)data; unsigned long flags; lbs_deb_enter(LBS_DEB_CMD); spin_lock_irqsave(&priv->driver_lock, flags); if (!priv->cur_cmd) goto out; netdev_info(priv->dev, "command 0x%04x timed out\n", le16_to_cpu(priv->cur_cmd->cmdbuf->command)); priv->cmd_timed_out = 1; /* * If the device didn't even acknowledge the command, reset the state * so that we don't block all future commands due to this one timeout. */ if (priv->dnld_sent == DNLD_CMD_SENT) priv->dnld_sent = DNLD_RES_RECEIVED; wake_up(&priv->waitq); out: spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_leave(LBS_DEB_CMD); } /** * lbs_tx_lockup_handler - handles the timeout of the passing of TX frames * to the hardware. This is known to frequently happen with SD8686 when * waking up after a Wake-on-WLAN-triggered resume. * * @data: &struct lbs_private pointer */ static void lbs_tx_lockup_handler(unsigned long data) { struct lbs_private *priv = (struct lbs_private *)data; unsigned long flags; lbs_deb_enter(LBS_DEB_TX); spin_lock_irqsave(&priv->driver_lock, flags); netdev_info(priv->dev, "TX lockup detected\n"); if (priv->reset_card) priv->reset_card(priv); priv->dnld_sent = DNLD_RES_RECEIVED; wake_up_interruptible(&priv->waitq); spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_leave(LBS_DEB_TX); } /** * auto_deepsleep_timer_fn - put the device back to deep sleep mode when * timer expires and no activity (command, event, data etc.) is detected. * @data: &struct lbs_private pointer * returns: N/A */ static void auto_deepsleep_timer_fn(unsigned long data) { struct lbs_private *priv = (struct lbs_private *)data; lbs_deb_enter(LBS_DEB_CMD); if (priv->is_activity_detected) { priv->is_activity_detected = 0; } else { if (priv->is_auto_deep_sleep_enabled && (!priv->wakeup_dev_required) && (priv->connect_status != LBS_CONNECTED)) { struct cmd_header cmd; lbs_deb_main("Entering auto deep sleep mode...\n"); memset(&cmd, 0, sizeof(cmd)); cmd.size = cpu_to_le16(sizeof(cmd)); lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd)); } } mod_timer(&priv->auto_deepsleep_timer , jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000); lbs_deb_leave(LBS_DEB_CMD); } int lbs_enter_auto_deep_sleep(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_SDIO); priv->is_auto_deep_sleep_enabled = 1; if (priv->is_deep_sleep) priv->wakeup_dev_required = 1; mod_timer(&priv->auto_deepsleep_timer , jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000); lbs_deb_leave(LBS_DEB_SDIO); return 0; } int lbs_exit_auto_deep_sleep(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_SDIO); priv->is_auto_deep_sleep_enabled = 0; priv->auto_deep_sleep_timeout = 0; del_timer(&priv->auto_deepsleep_timer); lbs_deb_leave(LBS_DEB_SDIO); return 0; } static int lbs_init_adapter(struct lbs_private *priv) { int ret; lbs_deb_enter(LBS_DEB_MAIN); memset(priv->current_addr, 0xff, ETH_ALEN); priv->connect_status = LBS_DISCONNECTED; priv->channel = DEFAULT_AD_HOC_CHANNEL; priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; priv->radio_on = 1; priv->psmode = LBS802_11POWERMODECAM; priv->psstate = PS_STATE_FULL_POWER; priv->is_deep_sleep = 0; priv->is_auto_deep_sleep_enabled = 0; priv->deep_sleep_required = 0; priv->wakeup_dev_required = 0; init_waitqueue_head(&priv->ds_awake_q); init_waitqueue_head(&priv->scan_q); priv->authtype_auto = 1; priv->is_host_sleep_configured = 0; priv->is_host_sleep_activated = 0; init_waitqueue_head(&priv->host_sleep_q); init_waitqueue_head(&priv->fw_waitq); mutex_init(&priv->lock); setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, (unsigned long)priv); setup_timer(&priv->tx_lockup_timer, lbs_tx_lockup_handler, (unsigned long)priv); setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, (unsigned long)priv); INIT_LIST_HEAD(&priv->cmdfreeq); INIT_LIST_HEAD(&priv->cmdpendingq); spin_lock_init(&priv->driver_lock); /* Allocate the command buffers */ if (lbs_allocate_cmd_buffer(priv)) { pr_err("Out of memory allocating command buffers\n"); ret = -ENOMEM; goto out; } priv->resp_idx = 0; priv->resp_len[0] = priv->resp_len[1] = 0; /* Create the event FIFO */ ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); if (ret) { pr_err("Out of memory allocating event FIFO buffer\n"); goto out; } out: lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); return ret; } static void lbs_free_adapter(struct lbs_private *priv) { lbs_deb_enter(LBS_DEB_MAIN); lbs_free_cmd_buffer(priv); kfifo_free(&priv->event_fifo); del_timer(&priv->command_timer); del_timer(&priv->tx_lockup_timer); del_timer(&priv->auto_deepsleep_timer); lbs_deb_leave(LBS_DEB_MAIN); } static const struct net_device_ops lbs_netdev_ops = { .ndo_open = lbs_dev_open, .ndo_stop = lbs_eth_stop, .ndo_start_xmit = lbs_hard_start_xmit, .ndo_set_mac_address = lbs_set_mac_address, .ndo_set_rx_mode = lbs_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /** * lbs_add_card - adds the card. It will probe the * card, allocate the lbs_priv and initialize the device. * * @card: A pointer to card * @dmdev: A pointer to &struct device * returns: A pointer to &struct lbs_private structure */ struct lbs_private *lbs_add_card(void *card, struct device *dmdev) { struct net_device *dev; struct wireless_dev *wdev; struct lbs_private *priv = NULL; lbs_deb_enter(LBS_DEB_MAIN); /* Allocate an Ethernet device and register it */ wdev = lbs_cfg_alloc(dmdev); if (IS_ERR(wdev)) { pr_err("cfg80211 init failed\n"); goto done; } wdev->iftype = NL80211_IFTYPE_STATION; priv = wdev_priv(wdev); priv->wdev = wdev; if (lbs_init_adapter(priv)) { pr_err("failed to initialize adapter structure\n"); goto err_wdev; } dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup); if (!dev) { dev_err(dmdev, "no memory for network device instance\n"); goto err_adapter; } dev->ieee80211_ptr = wdev; dev->ml_priv = priv; SET_NETDEV_DEV(dev, dmdev); wdev->netdev = dev; priv->dev = dev; dev->netdev_ops = &lbs_netdev_ops; dev->watchdog_timeo = 5 * HZ; dev->ethtool_ops = &lbs_ethtool_ops; dev->flags |= IFF_BROADCAST | IFF_MULTICAST; priv->card = card; strcpy(dev->name, "wlan%d"); lbs_deb_thread("Starting main thread...\n"); init_waitqueue_head(&priv->waitq); priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); if (IS_ERR(priv->main_thread)) { lbs_deb_thread("Error creating main thread.\n"); goto err_ndev; } priv->work_thread = create_singlethread_workqueue("lbs_worker"); INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); priv->wol_criteria = EHS_REMOVE_WAKEUP; priv->wol_gpio = 0xff; priv->wol_gap = 20; priv->ehs_remove_supported = true; goto done; err_ndev: free_netdev(dev); err_adapter: lbs_free_adapter(priv); err_wdev: lbs_cfg_free(priv); priv = NULL; done: lbs_deb_leave_args(LBS_DEB_MAIN, "priv %p", priv); return priv; } EXPORT_SYMBOL_GPL(lbs_add_card); void lbs_remove_card(struct lbs_private *priv) { struct net_device *dev = priv->dev; lbs_deb_enter(LBS_DEB_MAIN); lbs_remove_mesh(priv); if (priv->wiphy_registered) lbs_scan_deinit(priv); lbs_wait_for_firmware_load(priv); /* worker thread destruction blocks on the in-flight command which * should have been cleared already in lbs_stop_card(). */ lbs_deb_main("destroying worker thread\n"); destroy_workqueue(priv->work_thread); lbs_deb_main("done destroying worker thread\n"); if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { priv->psmode = LBS802_11POWERMODECAM; lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); } if (priv->is_deep_sleep) { priv->is_deep_sleep = 0; wake_up_interruptible(&priv->ds_awake_q); } priv->is_host_sleep_configured = 0; priv->is_host_sleep_activated = 0; wake_up_interruptible(&priv->host_sleep_q); /* Stop the thread servicing the interrupts */ priv->surpriseremoved = 1; kthread_stop(priv->main_thread); lbs_free_adapter(priv); lbs_cfg_free(priv); free_netdev(dev); lbs_deb_leave(LBS_DEB_MAIN); } EXPORT_SYMBOL_GPL(lbs_remove_card); int lbs_rtap_supported(struct lbs_private *priv) { if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) return 1; /* newer firmware use a capability mask */ return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) && (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)); } int lbs_start_card(struct lbs_private *priv) { struct net_device *dev = priv->dev; int ret = -1; lbs_deb_enter(LBS_DEB_MAIN); /* poke the firmware */ ret = lbs_setup_firmware(priv); if (ret) goto done; if (!lbs_disablemesh) lbs_init_mesh(priv); else pr_info("%s: mesh disabled\n", dev->name); if (lbs_cfg_register(priv)) { pr_err("cannot register device\n"); goto done; } if (lbs_mesh_activated(priv)) lbs_start_mesh(priv); lbs_debugfs_init_one(priv, dev); netdev_info(dev, "Marvell WLAN 802.11 adapter\n"); ret = 0; done: lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(lbs_start_card); void lbs_stop_card(struct lbs_private *priv) { struct net_device *dev; lbs_deb_enter(LBS_DEB_MAIN); if (!priv) goto out; dev = priv->dev; /* If the netdev isn't registered, it means that lbs_start_card() was * never called so we have nothing to do here. */ if (dev->reg_state != NETREG_REGISTERED) goto out; netif_stop_queue(dev); netif_carrier_off(dev); lbs_debugfs_remove_one(priv); lbs_deinit_mesh(priv); unregister_netdev(dev); out: lbs_deb_leave(LBS_DEB_MAIN); } EXPORT_SYMBOL_GPL(lbs_stop_card); void lbs_queue_event(struct lbs_private *priv, u32 event) { unsigned long flags; lbs_deb_enter(LBS_DEB_THREAD); spin_lock_irqsave(&priv->driver_lock, flags); if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); wake_up(&priv->waitq); spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_leave(LBS_DEB_THREAD); } EXPORT_SYMBOL_GPL(lbs_queue_event); void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx) { lbs_deb_enter(LBS_DEB_THREAD); if (priv->psstate == PS_STATE_SLEEP) priv->psstate = PS_STATE_AWAKE; /* Swap buffers by flipping the response index */ BUG_ON(resp_idx > 1); priv->resp_idx = resp_idx; wake_up(&priv->waitq); lbs_deb_leave(LBS_DEB_THREAD); } EXPORT_SYMBOL_GPL(lbs_notify_command_response); static int __init lbs_init_module(void) { lbs_deb_enter(LBS_DEB_MAIN); memset(&confirm_sleep, 0, sizeof(confirm_sleep)); confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE); confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep)); confirm_sleep.action = cpu_to_le16(PS_MODE_ACTION_SLEEP_CONFIRMED); lbs_debugfs_init(); lbs_deb_leave(LBS_DEB_MAIN); return 0; } static void __exit lbs_exit_module(void) { lbs_deb_enter(LBS_DEB_MAIN); lbs_debugfs_remove(); lbs_deb_leave(LBS_DEB_MAIN); } module_init(lbs_init_module); module_exit(lbs_exit_module); MODULE_DESCRIPTION("Libertas WLAN Driver Library"); MODULE_AUTHOR("Marvell International Ltd."); MODULE_LICENSE("GPL");
gpl-2.0
poondog/kangaroo-m7-mkII
drivers/staging/comedi/drivers/dt282x.c
1154
36150
/* comedi/drivers/dt282x.c Hardware driver for Data Translation DT2821 series COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1997-8 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dt282x Description: Data Translation DT2821 series (including DT-EZ) Author: ds Devices: [Data Translation] DT2821 (dt2821), DT2821-F-16SE (dt2821-f), DT2821-F-8DI (dt2821-f), DT2821-G-16SE (dt2821-f), DT2821-G-8DI (dt2821-g), DT2823 (dt2823), DT2824-PGH (dt2824-pgh), DT2824-PGL (dt2824-pgl), DT2825 (dt2825), DT2827 (dt2827), DT2828 (dt2828), DT21-EZ (dt21-ez), DT23-EZ (dt23-ez), DT24-EZ (dt24-ez), DT24-EZ-PGL (dt24-ez-pgl) Status: complete Updated: Wed, 22 Aug 2001 17:11:34 -0700 Configuration options: [0] - I/O port base address [1] - IRQ [2] - DMA 1 [3] - DMA 2 [4] - AI jumpered for 0=single ended, 1=differential [5] - AI jumpered for 0=straight binary, 1=2's complement [6] - AO 0 jumpered for 0=straight binary, 1=2's complement [7] - AO 1 jumpered for 0=straight binary, 1=2's complement [8] - AI jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5] [9] - AO 0 jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5], 4=[-2.5,2.5] [10]- A0 1 jumpered for 0=[-10,10]V, 1=[0,10], 2=[-5,5], 3=[0,5], 4=[-2.5,2.5] Notes: - AO commands might be broken. - If you try to run a command on both the AI and AO subdevices simultaneously, bad things will happen. The driver needs to be fixed to check for this situation and return an error. */ #include "../comedidev.h" #include <linux/gfp.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/io.h> #include <asm/dma.h> #include "comedi_fc.h" #define DEBUG #define DT2821_TIMEOUT 100 /* 500 us */ #define DT2821_SIZE 0x10 /* * Registers in the DT282x */ #define DT2821_ADCSR 0x00 /* A/D Control/Status */ #define DT2821_CHANCSR 0x02 /* Channel Control/Status */ #define DT2821_ADDAT 0x04 /* A/D data */ #define DT2821_DACSR 0x06 /* D/A Control/Status */ #define DT2821_DADAT 0x08 /* D/A data */ #define DT2821_DIODAT 0x0a /* digital data */ #define DT2821_SUPCSR 0x0c /* Supervisor Control/Status */ #define DT2821_TMRCTR 0x0e /* Timer/Counter */ /* * At power up, some registers are in a well-known state. The * masks and values are as follows: */ #define DT2821_ADCSR_MASK 0xfff0 #define DT2821_ADCSR_VAL 0x7c00 #define DT2821_CHANCSR_MASK 0xf0f0 #define DT2821_CHANCSR_VAL 0x70f0 #define DT2821_DACSR_MASK 0x7c93 #define DT2821_DACSR_VAL 0x7c90 #define DT2821_SUPCSR_MASK 0xf8ff #define DT2821_SUPCSR_VAL 0x0000 #define DT2821_TMRCTR_MASK 0xff00 #define DT2821_TMRCTR_VAL 0xf000 /* * Bit fields of each register */ /* ADCSR */ #define DT2821_ADERR 0x8000 /* (R) 1 for A/D error */ #define DT2821_ADCLK 0x0200 /* (R/W) A/D clock enable */ /* 0x7c00 read as 1's */ #define DT2821_MUXBUSY 0x0100 /* (R) multiplexer busy */ #define DT2821_ADDONE 0x0080 /* (R) A/D done */ #define DT2821_IADDONE 0x0040 /* (R/W) interrupt on A/D done */ /* 0x0030 gain select */ /* 0x000f channel select */ /* CHANCSR */ #define DT2821_LLE 0x8000 /* (R/W) Load List Enable */ /* 0x7000 read as 1's */ /* 0x0f00 (R) present address */ /* 0x00f0 read as 1's */ /* 0x000f (R) number of entries - 1 */ /* DACSR */ #define DT2821_DAERR 0x8000 /* (R) D/A error */ #define DT2821_YSEL 0x0200 /* (R/W) DAC 1 select */ #define DT2821_SSEL 0x0100 /* (R/W) single channel select */ #define DT2821_DACRDY 0x0080 /* (R) DAC ready */ #define DT2821_IDARDY 0x0040 /* (R/W) interrupt on DAC ready */ #define DT2821_DACLK 0x0020 /* (R/W) D/A clock enable */ #define DT2821_HBOE 0x0002 /* (R/W) DIO high byte output enable */ #define DT2821_LBOE 0x0001 /* (R/W) DIO low byte output enable */ /* SUPCSR */ #define DT2821_DMAD 0x8000 /* (R) DMA done */ #define DT2821_ERRINTEN 0x4000 /* (R/W) interrupt on error */ #define DT2821_CLRDMADNE 0x2000 /* (W) clear DMA done */ #define DT2821_DDMA 0x1000 /* (R/W) dual DMA */ #define DT2821_DS1 0x0800 /* (R/W) DMA select 1 */ #define DT2821_DS0 0x0400 /* (R/W) DMA select 0 */ #define DT2821_BUFFB 0x0200 /* (R/W) buffer B selected */ #define DT2821_SCDN 0x0100 /* (R) scan done */ #define DT2821_DACON 0x0080 /* (W) DAC single conversion */ #define DT2821_ADCINIT 0x0040 /* (W) A/D initialize */ #define DT2821_DACINIT 0x0020 /* (W) D/A initialize */ #define DT2821_PRLD 0x0010 /* (W) preload multiplexer */ #define DT2821_STRIG 0x0008 /* (W) software trigger */ #define DT2821_XTRIG 0x0004 /* (R/W) external trigger enable */ #define DT2821_XCLK 0x0002 /* (R/W) external clock enable */ #define DT2821_BDINIT 0x0001 /* (W) initialize board */ static const struct comedi_lrange range_dt282x_ai_lo_bipolar = { 4, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25) } }; static const struct comedi_lrange range_dt282x_ai_lo_unipolar = { 4, { RANGE(0, 10), RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25) } }; static const struct comedi_lrange range_dt282x_ai_5_bipolar = { 4, { RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25), RANGE(-0.625, 0.625) } }; static const struct comedi_lrange range_dt282x_ai_5_unipolar = { 4, { RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25), RANGE(0, 0.625), } }; static const struct comedi_lrange range_dt282x_ai_hi_bipolar = { 4, { RANGE(-10, 10), RANGE(-1, 1), RANGE(-0.1, 0.1), RANGE(-0.02, 0.02) } }; static const struct comedi_lrange range_dt282x_ai_hi_unipolar = { 4, { RANGE(0, 10), RANGE(0, 1), RANGE(0, 0.1), RANGE(0, 0.02) } }; struct dt282x_board { const char *name; int adbits; int adchan_se; int adchan_di; int ai_speed; int ispgl; int dachan; int dabits; }; static const struct dt282x_board boardtypes[] = { {.name = "dt2821", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 20000, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt2821-f", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 6500, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt2821-g", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 4000, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt2823", .adbits = 16, .adchan_se = 0, .adchan_di = 4, .ai_speed = 10000, .ispgl = 0, .dachan = 2, .dabits = 16, }, {.name = "dt2824-pgh", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 20000, .ispgl = 0, .dachan = 0, .dabits = 0, }, {.name = "dt2824-pgl", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 20000, .ispgl = 1, .dachan = 0, .dabits = 0, }, {.name = "dt2825", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 20000, .ispgl = 1, .dachan = 2, .dabits = 12, }, {.name = "dt2827", .adbits = 16, .adchan_se = 0, .adchan_di = 4, .ai_speed = 10000, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt2828", .adbits = 12, .adchan_se = 4, .adchan_di = 0, .ai_speed = 10000, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt2829", .adbits = 16, .adchan_se = 8, .adchan_di = 0, .ai_speed = 33250, .ispgl = 0, .dachan = 2, .dabits = 16, }, {.name = "dt21-ez", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 10000, .ispgl = 0, .dachan = 2, .dabits = 12, }, {.name = "dt23-ez", .adbits = 16, .adchan_se = 16, .adchan_di = 8, .ai_speed = 10000, .ispgl = 0, .dachan = 0, .dabits = 0, }, {.name = "dt24-ez", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 10000, .ispgl = 0, .dachan = 0, .dabits = 0, }, {.name = "dt24-ez-pgl", .adbits = 12, .adchan_se = 16, .adchan_di = 8, .ai_speed = 10000, .ispgl = 1, .dachan = 0, .dabits = 0, }, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct dt282x_board)) #define this_board ((const struct dt282x_board *)dev->board_ptr) struct dt282x_private { int ad_2scomp; /* we have 2's comp jumper set */ int da0_2scomp; /* same, for DAC0 */ int da1_2scomp; /* same, for DAC1 */ const struct comedi_lrange *darangelist[2]; short ao[2]; volatile int dacsr; /* software copies of registers */ volatile int adcsr; volatile int supcsr; volatile int ntrig; volatile int nread; struct { int chan; short *buf; /* DMA buffer */ volatile int size; /* size of current transfer */ } dma[2]; int dma_maxsize; /* max size of DMA transfer (in bytes) */ int usedma; /* driver uses DMA */ volatile int current_dma_index; int dma_dir; }; #define devpriv ((struct dt282x_private *)dev->private) #define boardtype (*(const struct dt282x_board *)dev->board_ptr) /* * Some useless abstractions */ #define chan_to_DAC(a) ((a)&1) #define update_dacsr(a) outw(devpriv->dacsr|(a), dev->iobase+DT2821_DACSR) #define update_adcsr(a) outw(devpriv->adcsr|(a), dev->iobase+DT2821_ADCSR) #define mux_busy() (inw(dev->iobase+DT2821_ADCSR)&DT2821_MUXBUSY) #define ad_done() (inw(dev->iobase+DT2821_ADCSR)&DT2821_ADDONE) #define update_supcsr(a) outw(devpriv->supcsr|(a), dev->iobase+DT2821_SUPCSR) /* * danger! macro abuse... a is the expression to wait on, and b is * the statement(s) to execute if it doesn't happen. */ #define wait_for(a, b) \ do { \ int _i; \ for (_i = 0; _i < DT2821_TIMEOUT; _i++) { \ if (a) { \ _i = 0; \ break; \ } \ udelay(5); \ } \ if (_i) { \ b \ } \ } while (0) static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dt282x_detach(struct comedi_device *dev); static struct comedi_driver driver_dt282x = { .driver_name = "dt282x", .module = THIS_MODULE, .attach = dt282x_attach, .detach = dt282x_detach, .board_name = &boardtypes[0].name, .num_names = n_boardtypes, .offset = sizeof(struct dt282x_board), }; static int __init driver_dt282x_init_module(void) { return comedi_driver_register(&driver_dt282x); } static void __exit driver_dt282x_cleanup_module(void) { comedi_driver_unregister(&driver_dt282x); } module_init(driver_dt282x_init_module); module_exit(driver_dt282x_cleanup_module); static void free_resources(struct comedi_device *dev); static int prep_ai_dma(struct comedi_device *dev, int chan, int size); static int prep_ao_dma(struct comedi_device *dev, int chan, int size); static int dt282x_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int dt282x_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int dt282x_ns_to_timer(int *nanosec, int round_mode); static void dt282x_disable_dma(struct comedi_device *dev); static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2); static void dt282x_munge(struct comedi_device *dev, short *buf, unsigned int nbytes) { unsigned int i; unsigned short mask = (1 << boardtype.adbits) - 1; unsigned short sign = 1 << (boardtype.adbits - 1); int n; if (devpriv->ad_2scomp) sign = 1 << (boardtype.adbits - 1); else sign = 0; if (nbytes % 2) comedi_error(dev, "bug! odd number of bytes from dma xfer"); n = nbytes / 2; for (i = 0; i < n; i++) buf[i] = (buf[i] & mask) ^ sign; } static void dt282x_ao_dma_interrupt(struct comedi_device *dev) { void *ptr; int size; int i; struct comedi_subdevice *s = dev->subdevices + 1; update_supcsr(DT2821_CLRDMADNE); if (!s->async->prealloc_buf) { printk(KERN_ERR "async->data disappeared. dang!\n"); return; } i = devpriv->current_dma_index; ptr = devpriv->dma[i].buf; disable_dma(devpriv->dma[i].chan); devpriv->current_dma_index = 1 - i; size = cfc_read_array_from_buffer(s, ptr, devpriv->dma_maxsize); if (size == 0) { printk(KERN_ERR "dt282x: AO underrun\n"); dt282x_ao_cancel(dev, s); s->async->events |= COMEDI_CB_OVERFLOW; return; } prep_ao_dma(dev, i, size); return; } static void dt282x_ai_dma_interrupt(struct comedi_device *dev) { void *ptr; int size; int i; int ret; struct comedi_subdevice *s = dev->subdevices; update_supcsr(DT2821_CLRDMADNE); if (!s->async->prealloc_buf) { printk(KERN_ERR "async->data disappeared. dang!\n"); return; } i = devpriv->current_dma_index; ptr = devpriv->dma[i].buf; size = devpriv->dma[i].size; disable_dma(devpriv->dma[i].chan); devpriv->current_dma_index = 1 - i; dt282x_munge(dev, ptr, size); ret = cfc_write_array_to_buffer(s, ptr, size); if (ret != size) { dt282x_ai_cancel(dev, s); return; } devpriv->nread -= size / 2; if (devpriv->nread < 0) { printk(KERN_INFO "dt282x: off by one\n"); devpriv->nread = 0; } if (!devpriv->nread) { dt282x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_EOA; return; } #if 0 /* clear the dual dma flag, making this the last dma segment */ /* XXX probably wrong */ if (!devpriv->ntrig) { devpriv->supcsr &= ~(DT2821_DDMA); update_supcsr(0); } #endif /* restart the channel */ prep_ai_dma(dev, i, 0); } static int prep_ai_dma(struct comedi_device *dev, int dma_index, int n) { int dma_chan; unsigned long dma_ptr; unsigned long flags; if (!devpriv->ntrig) return 0; if (n == 0) n = devpriv->dma_maxsize; if (n > devpriv->ntrig * 2) n = devpriv->ntrig * 2; devpriv->ntrig -= n / 2; devpriv->dma[dma_index].size = n; dma_chan = devpriv->dma[dma_index].chan; dma_ptr = virt_to_bus(devpriv->dma[dma_index].buf); set_dma_mode(dma_chan, DMA_MODE_READ); flags = claim_dma_lock(); clear_dma_ff(dma_chan); set_dma_addr(dma_chan, dma_ptr); set_dma_count(dma_chan, n); release_dma_lock(flags); enable_dma(dma_chan); return n; } static int prep_ao_dma(struct comedi_device *dev, int dma_index, int n) { int dma_chan; unsigned long dma_ptr; unsigned long flags; devpriv->dma[dma_index].size = n; dma_chan = devpriv->dma[dma_index].chan; dma_ptr = virt_to_bus(devpriv->dma[dma_index].buf); set_dma_mode(dma_chan, DMA_MODE_WRITE); flags = claim_dma_lock(); clear_dma_ff(dma_chan); set_dma_addr(dma_chan, dma_ptr); set_dma_count(dma_chan, n); release_dma_lock(flags); enable_dma(dma_chan); return n; } static irqreturn_t dt282x_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s; struct comedi_subdevice *s_ao; unsigned int supcsr, adcsr, dacsr; int handled = 0; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } s = dev->subdevices + 0; s_ao = dev->subdevices + 1; adcsr = inw(dev->iobase + DT2821_ADCSR); dacsr = inw(dev->iobase + DT2821_DACSR); supcsr = inw(dev->iobase + DT2821_SUPCSR); if (supcsr & DT2821_DMAD) { if (devpriv->dma_dir == DMA_MODE_READ) dt282x_ai_dma_interrupt(dev); else dt282x_ao_dma_interrupt(dev); handled = 1; } if (adcsr & DT2821_ADERR) { if (devpriv->nread != 0) { comedi_error(dev, "A/D error"); dt282x_ai_cancel(dev, s); s->async->events |= COMEDI_CB_ERROR; } handled = 1; } if (dacsr & DT2821_DAERR) { #if 0 static int warn = 5; if (--warn <= 0) { disable_irq(dev->irq); printk(KERN_INFO "disabling irq\n"); } #endif comedi_error(dev, "D/A error"); dt282x_ao_cancel(dev, s_ao); s->async->events |= COMEDI_CB_ERROR; handled = 1; } #if 0 if (adcsr & DT2821_ADDONE) { int ret; short data; data = (short)inw(dev->iobase + DT2821_ADDAT); data &= (1 << boardtype.adbits) - 1; if (devpriv->ad_2scomp) data ^= 1 << (boardtype.adbits - 1); ret = comedi_buf_put(s->async, data); if (ret == 0) s->async->events |= COMEDI_CB_OVERFLOW; devpriv->nread--; if (!devpriv->nread) { s->async->events |= COMEDI_CB_EOA; } else { if (supcsr & DT2821_SCDN) update_supcsr(DT2821_STRIG); } handled = 1; } #endif comedi_event(dev, s); /* printk("adcsr=0x%02x dacsr-0x%02x supcsr=0x%02x\n", adcsr, dacsr, supcsr); */ return IRQ_RETVAL(handled); } static void dt282x_load_changain(struct comedi_device *dev, int n, unsigned int *chanlist) { unsigned int i; unsigned int chan, range; outw(DT2821_LLE | (n - 1), dev->iobase + DT2821_CHANCSR); for (i = 0; i < n; i++) { chan = CR_CHAN(chanlist[i]); range = CR_RANGE(chanlist[i]); update_adcsr((range << 4) | (chan)); } outw(n - 1, dev->iobase + DT2821_CHANCSR); } /* * Performs a single A/D conversion. * - Put channel/gain into channel-gain list * - preload multiplexer * - trigger conversion and wait for it to finish */ static int dt282x_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; /* XXX should we really be enabling the ad clock here? */ devpriv->adcsr = DT2821_ADCLK; update_adcsr(0); dt282x_load_changain(dev, 1, &insn->chanspec); update_supcsr(DT2821_PRLD); wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;); for (i = 0; i < insn->n; i++) { update_supcsr(DT2821_STRIG); wait_for(ad_done(), comedi_error(dev, "timeout\n"); return -ETIME;); data[i] = inw(dev->iobase + DT2821_ADDAT) & ((1 << boardtype.adbits) - 1); if (devpriv->ad_2scomp) data[i] ^= (1 << (boardtype.adbits - 1)); } return i; } static int dt282x_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: make sure trigger sources are unique * and mutually compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } else { /* external trigger */ /* should be level/edge, hi/lo specification here */ if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } } if (cmd->convert_arg < 4000) { /* XXX board dependent */ cmd->convert_arg = 4000; err++; } #define SLOWEST_TIMER (250*(1<<15)*255) if (cmd->convert_arg > SLOWEST_TIMER) { cmd->convert_arg = SLOWEST_TIMER; err++; } if (cmd->convert_arg < this_board->ai_speed) { cmd->convert_arg = this_board->ai_speed; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ tmp = cmd->convert_arg; dt282x_ns_to_timer(&cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; if (err) return 4; return 0; } static int dt282x_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; int timer; if (devpriv->usedma == 0) { comedi_error(dev, "driver requires 2 dma channels" " to execute command"); return -EIO; } dt282x_disable_dma(dev); if (cmd->convert_arg < this_board->ai_speed) cmd->convert_arg = this_board->ai_speed; timer = dt282x_ns_to_timer(&cmd->convert_arg, TRIG_ROUND_NEAREST); outw(timer, dev->iobase + DT2821_TMRCTR); if (cmd->scan_begin_src == TRIG_FOLLOW) { /* internal trigger */ devpriv->supcsr = DT2821_ERRINTEN | DT2821_DS0; } else { /* external trigger */ devpriv->supcsr = DT2821_ERRINTEN | DT2821_DS0 | DT2821_DS1; } update_supcsr(DT2821_CLRDMADNE | DT2821_BUFFB | DT2821_ADCINIT); devpriv->ntrig = cmd->stop_arg * cmd->scan_end_arg; devpriv->nread = devpriv->ntrig; devpriv->dma_dir = DMA_MODE_READ; devpriv->current_dma_index = 0; prep_ai_dma(dev, 0, 0); if (devpriv->ntrig) { prep_ai_dma(dev, 1, 0); devpriv->supcsr |= DT2821_DDMA; update_supcsr(0); } devpriv->adcsr = 0; dt282x_load_changain(dev, cmd->chanlist_len, cmd->chanlist); devpriv->adcsr = DT2821_ADCLK | DT2821_IADDONE; update_adcsr(0); update_supcsr(DT2821_PRLD); wait_for(!mux_busy(), comedi_error(dev, "timeout\n"); return -ETIME;); if (cmd->scan_begin_src == TRIG_FOLLOW) { update_supcsr(DT2821_STRIG); } else { devpriv->supcsr |= DT2821_XTRIG; update_supcsr(0); } return 0; } static void dt282x_disable_dma(struct comedi_device *dev) { if (devpriv->usedma) { disable_dma(devpriv->dma[0].chan); disable_dma(devpriv->dma[1].chan); } } static int dt282x_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { dt282x_disable_dma(dev); devpriv->adcsr = 0; update_adcsr(0); devpriv->supcsr = 0; update_supcsr(DT2821_ADCINIT); return 0; } static int dt282x_ns_to_timer(int *nanosec, int round_mode) { int prescale, base, divider; for (prescale = 0; prescale < 16; prescale++) { if (prescale == 1) continue; base = 250 * (1 << prescale); switch (round_mode) { case TRIG_ROUND_NEAREST: default: divider = (*nanosec + base / 2) / base; break; case TRIG_ROUND_DOWN: divider = (*nanosec) / base; break; case TRIG_ROUND_UP: divider = (*nanosec + base - 1) / base; break; } if (divider < 256) { *nanosec = divider * base; return (prescale << 8) | (255 - divider); } } base = 250 * (1 << 15); divider = 255; *nanosec = divider * base; return (15 << 8) | (255 - divider); } /* * Analog output routine. Selects single channel conversion, * selects correct channel, converts from 2's compliment to * offset binary if necessary, loads the data into the DAC * data register, and performs the conversion. */ static int dt282x_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[0] = devpriv->ao[CR_CHAN(insn->chanspec)]; return 1; } static int dt282x_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { short d; unsigned int chan; chan = CR_CHAN(insn->chanspec); d = data[0]; d &= (1 << boardtype.dabits) - 1; devpriv->ao[chan] = d; devpriv->dacsr |= DT2821_SSEL; if (chan) { /* select channel */ devpriv->dacsr |= DT2821_YSEL; if (devpriv->da0_2scomp) d ^= (1 << (boardtype.dabits - 1)); } else { devpriv->dacsr &= ~DT2821_YSEL; if (devpriv->da1_2scomp) d ^= (1 << (boardtype.dabits - 1)); } update_dacsr(0); outw(d, dev->iobase + DT2821_DADAT); update_supcsr(DT2821_DACON); return 1; } static int dt282x_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_INT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* * step 2: make sure trigger sources are unique * and mutually compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_arg < 5000 /* XXX unknown */) { cmd->scan_begin_arg = 5000; err++; } if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } if (cmd->scan_end_arg > 2) { /* XXX chanlist stuff? */ cmd->scan_end_arg = 2; err++; } if (cmd->stop_src == TRIG_COUNT) { /* any count is allowed */ } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ tmp = cmd->scan_begin_arg; dt282x_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; if (err) return 4; return 0; } static int dt282x_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int x) { int size; if (x != 0) return -EINVAL; size = cfc_read_array_from_buffer(s, devpriv->dma[0].buf, devpriv->dma_maxsize); if (size == 0) { printk(KERN_ERR "dt282x: AO underrun\n"); return -EPIPE; } prep_ao_dma(dev, 0, size); size = cfc_read_array_from_buffer(s, devpriv->dma[1].buf, devpriv->dma_maxsize); if (size == 0) { printk(KERN_ERR "dt282x: AO underrun\n"); return -EPIPE; } prep_ao_dma(dev, 1, size); update_supcsr(DT2821_STRIG); s->async->inttrig = NULL; return 1; } static int dt282x_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int timer; struct comedi_cmd *cmd = &s->async->cmd; if (devpriv->usedma == 0) { comedi_error(dev, "driver requires 2 dma channels" " to execute command"); return -EIO; } dt282x_disable_dma(dev); devpriv->supcsr = DT2821_ERRINTEN | DT2821_DS1 | DT2821_DDMA; update_supcsr(DT2821_CLRDMADNE | DT2821_BUFFB | DT2821_DACINIT); devpriv->ntrig = cmd->stop_arg * cmd->chanlist_len; devpriv->nread = devpriv->ntrig; devpriv->dma_dir = DMA_MODE_WRITE; devpriv->current_dma_index = 0; timer = dt282x_ns_to_timer(&cmd->scan_begin_arg, TRIG_ROUND_NEAREST); outw(timer, dev->iobase + DT2821_TMRCTR); devpriv->dacsr = DT2821_SSEL | DT2821_DACLK | DT2821_IDARDY; update_dacsr(0); s->async->inttrig = dt282x_ao_inttrig; return 0; } static int dt282x_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { dt282x_disable_dma(dev); devpriv->dacsr = 0; update_dacsr(0); devpriv->supcsr = 0; update_supcsr(DT2821_DACINIT); return 0; } static int dt282x_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); outw(s->state, dev->iobase + DT2821_DIODAT); } data[1] = inw(dev->iobase + DT2821_DIODAT); return 2; } static int dt282x_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int mask; mask = (CR_CHAN(insn->chanspec) < 8) ? 0x00ff : 0xff00; if (data[0]) s->io_bits |= mask; else s->io_bits &= ~mask; if (s->io_bits & 0x00ff) devpriv->dacsr |= DT2821_LBOE; else devpriv->dacsr &= ~DT2821_LBOE; if (s->io_bits & 0xff00) devpriv->dacsr |= DT2821_HBOE; else devpriv->dacsr &= ~DT2821_HBOE; outw(devpriv->dacsr, dev->iobase + DT2821_DACSR); return 1; } static const struct comedi_lrange *const ai_range_table[] = { &range_dt282x_ai_lo_bipolar, &range_dt282x_ai_lo_unipolar, &range_dt282x_ai_5_bipolar, &range_dt282x_ai_5_unipolar }; static const struct comedi_lrange *const ai_range_pgl_table[] = { &range_dt282x_ai_hi_bipolar, &range_dt282x_ai_hi_unipolar }; static const struct comedi_lrange *opt_ai_range_lkup(int ispgl, int x) { if (ispgl) { if (x < 0 || x >= 2) x = 0; return ai_range_pgl_table[x]; } else { if (x < 0 || x >= 4) x = 0; return ai_range_table[x]; } } static const struct comedi_lrange *const ao_range_table[] = { &range_bipolar10, &range_unipolar10, &range_bipolar5, &range_unipolar5, &range_bipolar2_5 }; static const struct comedi_lrange *opt_ao_range_lkup(int x) { if (x < 0 || x >= 5) x = 0; return ao_range_table[x]; } enum { /* i/o base, irq, dma channels */ opt_iobase = 0, opt_irq, opt_dma1, opt_dma2, opt_diff, /* differential */ opt_ai_twos, opt_ao0_twos, opt_ao1_twos, /* twos comp */ opt_ai_range, opt_ao0_range, opt_ao1_range, /* range */ }; /* options: 0 i/o base 1 irq 2 dma1 3 dma2 4 0=single ended, 1=differential 5 ai 0=straight binary, 1=2's comp 6 ao0 0=straight binary, 1=2's comp 7 ao1 0=straight binary, 1=2's comp 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V */ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int i, irq; int ret; struct comedi_subdevice *s; unsigned long iobase; dev->board_name = this_board->name; iobase = it->options[opt_iobase]; if (!iobase) iobase = 0x240; printk(KERN_INFO "comedi%d: dt282x: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, DT2821_SIZE, "dt282x")) { printk(KERN_INFO " I/O port conflict\n"); return -EBUSY; } dev->iobase = iobase; outw(DT2821_BDINIT, dev->iobase + DT2821_SUPCSR); i = inw(dev->iobase + DT2821_ADCSR); #ifdef DEBUG printk(KERN_DEBUG " fingerprint=%x,%x,%x,%x,%x", inw(dev->iobase + DT2821_ADCSR), inw(dev->iobase + DT2821_CHANCSR), inw(dev->iobase + DT2821_DACSR), inw(dev->iobase + DT2821_SUPCSR), inw(dev->iobase + DT2821_TMRCTR)); #endif if (((inw(dev->iobase + DT2821_ADCSR) & DT2821_ADCSR_MASK) != DT2821_ADCSR_VAL) || ((inw(dev->iobase + DT2821_CHANCSR) & DT2821_CHANCSR_MASK) != DT2821_CHANCSR_VAL) || ((inw(dev->iobase + DT2821_DACSR) & DT2821_DACSR_MASK) != DT2821_DACSR_VAL) || ((inw(dev->iobase + DT2821_SUPCSR) & DT2821_SUPCSR_MASK) != DT2821_SUPCSR_VAL) || ((inw(dev->iobase + DT2821_TMRCTR) & DT2821_TMRCTR_MASK) != DT2821_TMRCTR_VAL)) { printk(KERN_ERR " board not found"); return -EIO; } /* should do board test */ irq = it->options[opt_irq]; #if 0 if (irq < 0) { unsigned long flags; int irqs; save_flags(flags); sti(); irqs = probe_irq_on(); /* trigger interrupt */ udelay(100); irq = probe_irq_off(irqs); restore_flags(flags); if (0 /* error */) printk(KERN_ERR " error probing irq (bad)"); } #endif if (irq > 0) { printk(KERN_INFO " ( irq = %d )", irq); ret = request_irq(irq, dt282x_interrupt, 0, "dt282x", dev); if (ret < 0) { printk(KERN_ERR " failed to get irq\n"); return -EIO; } dev->irq = irq; } else if (irq == 0) { printk(KERN_INFO " (no irq)"); } else { #if 0 printk(KERN_INFO " (probe returned multiple irqs--bad)"); #else printk(KERN_INFO " (irq probe not implemented)"); #endif } ret = alloc_private(dev, sizeof(struct dt282x_private)); if (ret < 0) return ret; ret = dt282x_grab_dma(dev, it->options[opt_dma1], it->options[opt_dma2]); if (ret < 0) return ret; ret = alloc_subdevices(dev, 3); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; /* ai subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ | ((it->options[opt_diff]) ? SDF_DIFF : SDF_COMMON); s->n_chan = (it->options[opt_diff]) ? boardtype.adchan_di : boardtype.adchan_se; s->insn_read = dt282x_ai_insn_read; s->do_cmdtest = dt282x_ai_cmdtest; s->do_cmd = dt282x_ai_cmd; s->cancel = dt282x_ai_cancel; s->maxdata = (1 << boardtype.adbits) - 1; s->len_chanlist = 16; s->range_table = opt_ai_range_lkup(boardtype.ispgl, it->options[opt_ai_range]); devpriv->ad_2scomp = it->options[opt_ai_twos]; s++; s->n_chan = boardtype.dachan; if (s->n_chan) { /* ao subsystem */ s->type = COMEDI_SUBD_AO; dev->write_subdev = s; s->subdev_flags = SDF_WRITABLE | SDF_CMD_WRITE; s->insn_read = dt282x_ao_insn_read; s->insn_write = dt282x_ao_insn_write; s->do_cmdtest = dt282x_ao_cmdtest; s->do_cmd = dt282x_ao_cmd; s->cancel = dt282x_ao_cancel; s->maxdata = (1 << boardtype.dabits) - 1; s->len_chanlist = 2; s->range_table_list = devpriv->darangelist; devpriv->darangelist[0] = opt_ao_range_lkup(it->options[opt_ao0_range]); devpriv->darangelist[1] = opt_ao_range_lkup(it->options[opt_ao1_range]); devpriv->da0_2scomp = it->options[opt_ao0_twos]; devpriv->da1_2scomp = it->options[opt_ao1_twos]; } else { s->type = COMEDI_SUBD_UNUSED; } s++; /* dio subsystem */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 16; s->insn_bits = dt282x_dio_insn_bits; s->insn_config = dt282x_dio_insn_config; s->maxdata = 1; s->range_table = &range_digital; printk(KERN_INFO "\n"); return 0; } static void free_resources(struct comedi_device *dev) { if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DT2821_SIZE); if (dev->private) { if (devpriv->dma[0].chan) free_dma(devpriv->dma[0].chan); if (devpriv->dma[1].chan) free_dma(devpriv->dma[1].chan); if (devpriv->dma[0].buf) free_page((unsigned long)devpriv->dma[0].buf); if (devpriv->dma[1].buf) free_page((unsigned long)devpriv->dma[1].buf); } } static int dt282x_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: dt282x: remove\n", dev->minor); free_resources(dev); return 0; } static int dt282x_grab_dma(struct comedi_device *dev, int dma1, int dma2) { int ret; devpriv->usedma = 0; if (!dma1 && !dma2) { printk(KERN_ERR " (no dma)"); return 0; } if (dma1 == dma2 || dma1 < 5 || dma2 < 5 || dma1 > 7 || dma2 > 7) return -EINVAL; if (dma2 < dma1) { int i; i = dma1; dma1 = dma2; dma2 = i; } ret = request_dma(dma1, "dt282x A"); if (ret) return -EBUSY; devpriv->dma[0].chan = dma1; ret = request_dma(dma2, "dt282x B"); if (ret) return -EBUSY; devpriv->dma[1].chan = dma2; devpriv->dma_maxsize = PAGE_SIZE; devpriv->dma[0].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); devpriv->dma[1].buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); if (!devpriv->dma[0].buf || !devpriv->dma[1].buf) { printk(KERN_ERR " can't get DMA memory"); return -ENOMEM; } printk(KERN_INFO " (dma=%d,%d)", dma1, dma2); devpriv->usedma = 1; return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
zf2-laser-dev/android_kernel_asus_Z00E
arch/arm/mach-pxa/icontrol.c
2178
4945
/* * linux/arch/arm/mach-pxa/icontrol.c * * Support for the iControl and SafeTcam platforms from TMT Services * using the Embedian MXM-8x10 Computer on Module * * Copyright (C) 2009 TMT Services & Supplies (Pty) Ltd. * * 2010-01-21 Hennie van der Merve <hvdmerwe@tmtservies.co.za> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa320.h> #include <mach/mxm8x10.h> #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/can/platform/mcp251x.h> #include "generic.h" #define ICONTROL_MCP251x_nCS1 (15) #define ICONTROL_MCP251x_nCS2 (16) #define ICONTROL_MCP251x_nCS3 (17) #define ICONTROL_MCP251x_nCS4 (24) #define ICONTROL_MCP251x_nIRQ1 (74) #define ICONTROL_MCP251x_nIRQ2 (75) #define ICONTROL_MCP251x_nIRQ3 (76) #define ICONTROL_MCP251x_nIRQ4 (77) static struct pxa2xx_spi_chip mcp251x_chip_info1 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS1 }; static struct pxa2xx_spi_chip mcp251x_chip_info2 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS2 }; static struct pxa2xx_spi_chip mcp251x_chip_info3 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS3 }; static struct pxa2xx_spi_chip mcp251x_chip_info4 = { .tx_threshold = 8, .rx_threshold = 128, .dma_burst_size = 8, .timeout = 235, .gpio_cs = ICONTROL_MCP251x_nCS4 }; static struct mcp251x_platform_data mcp251x_info = { .oscillator_frequency = 16E6, .board_specific_setup = NULL, .power_enable = NULL, .transceiver_enable = NULL }; static struct spi_board_info mcp251x_board_info[] = { { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 0, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info1, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 3, .chip_select = 1, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info2, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ2) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 0, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info3, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ3) }, { .modalias = "mcp2515", .max_speed_hz = 6500000, .bus_num = 4, .chip_select = 1, .platform_data = &mcp251x_info, .controller_data = &mcp251x_chip_info4, .irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ4) } }; static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = { .clock_enable = CKEN_SSP3, .num_chipselect = 2, .enable_dma = 1 }; static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = { .clock_enable = CKEN_SSP4, .num_chipselect = 2, .enable_dma = 1 }; struct platform_device pxa_spi_ssp3 = { .name = "pxa2xx-spi", .id = 3, .dev = { .platform_data = &pxa_ssp3_spi_master_info, } }; struct platform_device pxa_spi_ssp4 = { .name = "pxa2xx-spi", .id = 4, .dev = { .platform_data = &pxa_ssp4_spi_master_info, } }; static struct platform_device *icontrol_spi_devices[] __initdata = { &pxa_spi_ssp3, &pxa_spi_ssp4, }; static mfp_cfg_t mfp_can_cfg[] __initdata = { /* CAN CS lines */ GPIO15_GPIO, GPIO16_GPIO, GPIO17_GPIO, GPIO24_GPIO, /* SPI (SSP3) lines */ GPIO89_SSP3_SCLK, GPIO91_SSP3_TXD, GPIO92_SSP3_RXD, /* SPI (SSP4) lines */ GPIO93_SSP4_SCLK, GPIO95_SSP4_TXD, GPIO96_SSP4_RXD, /* CAN nIRQ lines */ GPIO74_GPIO | MFP_LPM_EDGE_RISE, GPIO75_GPIO | MFP_LPM_EDGE_RISE, GPIO76_GPIO | MFP_LPM_EDGE_RISE, GPIO77_GPIO | MFP_LPM_EDGE_RISE }; static void __init icontrol_can_init(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(mfp_can_cfg)); platform_add_devices(ARRAY_AND_SIZE(icontrol_spi_devices)); spi_register_board_info(ARRAY_AND_SIZE(mcp251x_board_info)); } static void __init icontrol_init(void) { mxm_8x10_barebones_init(); mxm_8x10_usb_host_init(); mxm_8x10_mmc_init(); icontrol_can_init(); } MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") .atag_offset = 0x100, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .init_time = pxa_timer_init, .init_machine = icontrol_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
ashhher3/linux
fs/squashfs/lzo_wrapper.c
2178
3122
/* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2010 LG Electronics * Chan Jeong <chan.jeong@lge.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * lzo_wrapper.c */ #include <linux/mutex.h> #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/lzo.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" #include "page_actor.h" struct squashfs_lzo { void *input; void *output; }; static void *lzo_init(struct squashfs_sb_info *msblk, void *buff) { int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) goto failed; stream->input = vmalloc(block_size); if (stream->input == NULL) goto failed; stream->output = vmalloc(block_size); if (stream->output == NULL) goto failed2; return stream; failed2: vfree(stream->input); failed: ERROR("Failed to allocate lzo workspace\n"); kfree(stream); return ERR_PTR(-ENOMEM); } static void lzo_free(void *strm) { struct squashfs_lzo *stream = strm; if (stream) { vfree(stream->input); vfree(stream->output); } kfree(stream); } static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, struct buffer_head **bh, int b, int offset, int length, struct squashfs_page_actor *output) { struct squashfs_lzo *stream = strm; void *buff = stream->input, *data; int avail, i, bytes = length, res; size_t out_len = output->length; for (i = 0; i < b; i++) { avail = min(bytes, msblk->devblksize - offset); memcpy(buff, bh[i]->b_data + offset, avail); buff += avail; bytes -= avail; offset = 0; put_bh(bh[i]); } res = lzo1x_decompress_safe(stream->input, (size_t)length, stream->output, &out_len); if (res != LZO_E_OK) goto failed; res = bytes = (int)out_len; data = squashfs_first_page(output); buff = stream->output; while (data) { if (bytes <= PAGE_CACHE_SIZE) { memcpy(data, buff, bytes); break; } else { memcpy(data, buff, PAGE_CACHE_SIZE); buff += PAGE_CACHE_SIZE; bytes -= PAGE_CACHE_SIZE; data = squashfs_next_page(output); } } squashfs_finish_page(output); return res; failed: return -EIO; } const struct squashfs_decompressor squashfs_lzo_comp_ops = { .init = lzo_init, .free = lzo_free, .decompress = lzo_uncompress, .id = LZO_COMPRESSION, .name = "lzo", .supported = 1 };
gpl-2.0
viaembedded/springboard-kernel-bsp
drivers/pcmcia/rsrc_nonstatic.c
2690
30445
/* * rsrc_nonstatic.c -- Resource management routines for !SS_CAP_STATIC_MAP sockets * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/io.h> #include <asm/irq.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" /* moved to rsrc_mgr.c MODULE_AUTHOR("David A. Hinds, Dominik Brodowski"); MODULE_LICENSE("GPL"); */ /* Parameters that can be set with 'insmod' */ #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) INT_MODULE_PARM(probe_mem, 1); /* memory probe? */ #ifdef CONFIG_PCMCIA_PROBE INT_MODULE_PARM(probe_io, 1); /* IO port probe? */ INT_MODULE_PARM(mem_limit, 0x10000); #endif /* for io_db and mem_db */ struct resource_map { u_long base, num; struct resource_map *next; }; struct socket_data { struct resource_map mem_db; struct resource_map mem_db_valid; struct resource_map io_db; }; #define MEM_PROBE_LOW (1 << 0) #define MEM_PROBE_HIGH (1 << 1) /* Action field */ #define REMOVE_MANAGED_RESOURCE 1 #define ADD_MANAGED_RESOURCE 2 /*====================================================================== Linux resource management extensions ======================================================================*/ static struct resource * claim_region(struct pcmcia_socket *s, resource_size_t base, resource_size_t size, int type, char *name) { struct resource *res, *parent; parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource; res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name); if (res) { #ifdef CONFIG_PCI if (s && s->cb_dev) parent = pci_find_parent_resource(s->cb_dev, res); #endif if (!parent || request_resource(parent, res)) { kfree(res); res = NULL; } } return res; } static void free_region(struct resource *res) { if (res) { release_resource(res); kfree(res); } } /*====================================================================== These manage the internal databases of available resources. ======================================================================*/ static int add_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = p->next) { if ((p != map) && (p->base+p->num >= base)) { p->num = max(num + base - p->base, p->num); return 0; } if ((p->next == map) || (p->next->base > base+num-1)) break; } q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!q) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } q->base = base; q->num = num; q->next = p->next; p->next = q; return 0; } /*====================================================================*/ static int sub_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = q) { q = p->next; if (q == map) break; if ((q->base+q->num > base) && (base+num > q->base)) { if (q->base >= base) { if (q->base+q->num <= base+num) { /* Delete whole block */ p->next = q->next; kfree(q); /* don't advance the pointer yet */ q = p; } else { /* Cut off bit from the front */ q->num = q->base + q->num - base - num; q->base = base + num; } } else if (q->base+q->num <= base+num) { /* Cut off bit from the end */ q->num = base - q->base; } else { /* Split the block into two pieces */ p = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!p) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } p->base = base+num; p->num = q->base+q->num - p->base; q->num = base - q->base; p->next = q->next ; q->next = p; } } } return 0; } /*====================================================================== These routines examine a region of IO or memory addresses to determine what ranges might be genuinely available. ======================================================================*/ #ifdef CONFIG_PCMCIA_PROBE static void do_io_probe(struct pcmcia_socket *s, unsigned int base, unsigned int num) { struct resource *res; struct socket_data *s_data = s->resource_data; unsigned int i, j, bad; int any; u_char *b, hole, most; dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", base, base+num-1); /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { printk("\n"); dev_printk(KERN_ERR, &s->dev, "do_io_probe: unable to kmalloc 256 bytes"); return; } for (i = base, most = 0; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) continue; hole = inb(i); for (j = 1; j < 8; j++) if (inb(i+j) != hole) break; free_region(res); if ((j == 8) && (++b[hole] > b[most])) most = hole; if (b[most] == 127) break; } kfree(b); bad = any = 0; for (i = base; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) { if (!any) printk(" excluding"); if (!bad) bad = any = i; continue; } for (j = 0; j < 8; j++) if (inb(i+j) != most) break; free_region(res); if (j < 8) { if (!any) printk(" excluding"); if (!bad) bad = any = i; } else { if (bad) { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); bad = 0; } } } if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { sub_interval(&s_data->io_db, bad, i-bad); printk(" nothing: probe failed.\n"); return; } else { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); } } printk(any ? "\n" : " clean.\n"); } #endif /*======================================================================*/ /** * readable() - iomem validation function for cards with a valid CIS */ static int readable(struct pcmcia_socket *s, struct resource *res, unsigned int *count) { int ret = -EINVAL; if (s->fake_cis) { dev_dbg(&s->dev, "fake CIS is being used: can't validate mem\n"); return 0; } s->cis_mem.res = res; s->cis_virt = ioremap(res->start, s->map_size); if (s->cis_virt) { mutex_unlock(&s->ops_mutex); /* as we're only called from pcmcia.c, we're safe */ if (s->callback->validate) ret = s->callback->validate(s, count); /* invalidate mapping */ mutex_lock(&s->ops_mutex); iounmap(s->cis_virt); s->cis_virt = NULL; } s->cis_mem.res = NULL; if ((ret) || (*count == 0)) return -EINVAL; return 0; } /** * checksum() - iomem validation function for simple memory cards */ static int checksum(struct pcmcia_socket *s, struct resource *res, unsigned int *value) { pccard_mem_map map; int i, a = 0, b = -1, d; void __iomem *virt; virt = ioremap(res->start, s->map_size); if (virt) { map.map = 0; map.flags = MAP_ACTIVE; map.speed = 0; map.res = res; map.card_start = 0; s->ops->set_mem_map(s, &map); /* Don't bother checking every word... */ for (i = 0; i < s->map_size; i += 44) { d = readl(virt+i); a += d; b &= d; } map.flags = 0; s->ops->set_mem_map(s, &map); iounmap(virt); } if (b == -1) return -EINVAL; *value = a; return 0; } /** * do_validate_mem() - low level validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @size: size of resource to check * @validate: validation function to use * * do_validate_mem() splits up the memory region which is to be checked * into two parts. Both are passed to the @validate() function. If * @validate() returns non-zero, or the value parameter to @validate() * is zero, or the value parameter is different between both calls, * the check fails, and -EINVAL is returned. Else, 0 is returned. */ static int do_validate_mem(struct pcmcia_socket *s, unsigned long base, unsigned long size, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; struct resource *res1, *res2; unsigned int info1 = 1, info2 = 1; int ret = -EINVAL; res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); if (res1 && res2) { ret = 0; if (validate) { ret = validate(s, res1, &info1); ret += validate(s, res2, &info2); } } free_region(res2); free_region(res1); dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u", base, base+size-1, res1, res2, ret, info1, info2); if ((ret) || (info1 != info2) || (info1 == 0)) return -EINVAL; if (validate && !s->fake_cis) { /* move it to the validated data set */ add_interval(&s_data->mem_db_valid, base, size); sub_interval(&s_data->mem_db, base, size); } return 0; } /** * do_mem_probe() - validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @num: size of resource to check * @validate: validation function to use * @fallback: validation function to use if validate fails * * do_mem_probe() checks a memory region for use by the PCMCIA subsystem. * To do so, the area is split up into sensible parts, and then passed * into the @validate() function. Only if @validate() and @fallback() fail, * the area is marked as unavaibale for use by the PCMCIA subsystem. The * function returns the size of the usable memory area. */ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value), int fallback (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; u_long i, j, bad, fail, step; dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", base, base+num-1); bad = fail = 0; step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); /* don't allow too large steps */ if (step > 0x800000) step = 0x800000; /* cis_readable wants to map 2x map_size */ if (step < 2 * s->map_size) step = 2 * s->map_size; for (i = j = base; i < base+num; i = j + step) { if (!fail) { for (j = i; j < base+num; j += step) { if (!do_validate_mem(s, j, step, validate)) break; } fail = ((i == base) && (j == base+num)); } if ((fail) && (fallback)) { for (j = i; j < base+num; j += step) if (!do_validate_mem(s, j, step, fallback)) break; } if (i != j) { if (!bad) printk(" excluding"); printk(" %#05lx-%#05lx", i, j-1); sub_interval(&s_data->mem_db, i, j-i); bad += j-i; } } printk(bad ? "\n" : " clean.\n"); return num - bad; } #ifdef CONFIG_PCMCIA_PROBE /** * inv_probe() - top-to-bottom search for one usuable high memory area * @s: PCMCIA socket to validate * @m: resource_map to check */ static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; u_long ok; if (m == &s_data->mem_db) return 0; ok = inv_probe(m->next, s); if (ok) { if (m->base >= 0x100000) sub_interval(&s_data->mem_db, m->base, m->num); return ok; } if (m->base < 0x100000) return 0; return do_mem_probe(s, m->base, m->num, readable, checksum); } /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: MEM_PROBE_LOW | MEM_PROBE_HIGH * * The memory probe. If the memory list includes a 64K-aligned block * below 1MB, we probe in 64K chunks, and as soon as we accumulate at * least mem_limit free space, we quit. Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; static unsigned char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 }; unsigned long b, i, ok = 0; struct socket_data *s_data = s->resource_data; /* We do up to four passes through the list */ if (probe_mask & MEM_PROBE_HIGH) { if (inv_probe(s_data->mem_db.next, s) > 0) return 0; if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; dev_printk(KERN_NOTICE, &s->dev, "cs: warning: no high memory space available!\n"); return -ENODEV; } for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; /* Only probe < 1 MB */ if (mm.base >= 0x100000) continue; if ((mm.base | mm.num) & 0xffff) { ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); continue; } /* Special probe for 64K-aligned block */ for (i = 0; i < 4; i++) { b = order[i] << 12; if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) { if (ok >= mem_limit) sub_interval(&s_data->mem_db, b, 0x10000); else ok += do_mem_probe(s, b, 0x10000, readable, checksum); } } } if (ok > 0) return 0; return -ENODEV; } #else /* CONFIG_PCMCIA_PROBE */ /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: ignored * * Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; struct socket_data *s_data = s->resource_data; unsigned long ok = 0; for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); } if (ok > 0) return 0; return -ENODEV; } #endif /* CONFIG_PCMCIA_PROBE */ /** * pcmcia_nonstatic_validate_mem() - try to validate iomem for PCMCIA use * @s: PCMCIA socket to validate * * This is tricky... when we set up CIS memory, we try to validate * the memory window space allocations. * * Locking note: Must be called with skt_mutex held! */ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; unsigned int probe_mask = MEM_PROBE_LOW; int ret; if (!probe_mem || !(s->state & SOCKET_PRESENT)) return 0; if (s->features & SS_CAP_PAGE_REGS) probe_mask = MEM_PROBE_HIGH; ret = validate_mem(s, probe_mask); if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; return ret; } struct pcmcia_align_data { unsigned long mask; unsigned long offset; struct resource_map *map; }; static resource_size_t pcmcia_common_align(struct pcmcia_align_data *align_data, resource_size_t start) { resource_size_t ret; /* * Ensure that we have the correct start address */ ret = (start & ~align_data->mask) + align_data->offset; if (ret < start) ret += align_data->mask + 1; return ret; } static resource_size_t pcmcia_align(void *align_data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pcmcia_align_data *data = align_data; struct resource_map *m; resource_size_t start; start = pcmcia_common_align(data, res->start); for (m = data->map->next; m != data->map; m = m->next) { unsigned long map_start = m->base; unsigned long map_end = m->base + m->num - 1; /* * If the lower resources are not available, try aligning * to this entry of the resource database to see if it'll * fit here. */ if (start < map_start) start = pcmcia_common_align(data, map_start); /* * If we're above the area which was passed in, there's * no point proceeding. */ if (start >= res->end) break; if ((start + size - 1) <= map_end) break; } /* * If we failed to find something suitable, ensure we fail. */ if (m == data->map) start = res->end; return start; } /* * Adjust an existing IO region allocation, but making sure that we don't * encroach outside the resources which the user supplied. */ static int __nonstatic_adjust_io_region(struct pcmcia_socket *s, unsigned long r_start, unsigned long r_end) { struct resource_map *m; struct socket_data *s_data = s->resource_data; int ret = -ENOMEM; for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) { unsigned long start = m->base; unsigned long end = m->base + m->num - 1; if (start > r_start || r_end > end) continue; ret = 0; } return ret; } /*====================================================================== These find ranges of I/O ports or memory addresses that are not currently allocated by other devices. The 'align' field should reflect the number of bits of address that need to be preserved from the initial value of *base. It should be a power of two, greater than or equal to 'num'. A value of 0 means that all bits of *base are significant. *base should also be strictly less than 'align'. ======================================================================*/ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s, unsigned long base, int num, unsigned long align) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min = base; int ret; data.mask = align - 1; data.offset = base & data.mask; data.map = &s_data->io_db; #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 1, pcmcia_align, &data); if (ret != 0) { kfree(res); res = NULL; } return res; } static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr, unsigned int *base, unsigned int num, unsigned int align, struct resource **parent) { int i, ret = 0; /* Check for an already-allocated window that must conflict with * what was asked for. It is a hack because it does not catch all * potential conflicts, just the most obvious ones. */ for (i = 0; i < MAX_IO_WIN; i++) { if (!s->io[i].res) continue; if (!*base) continue; if ((s->io[i].res->start & (align-1)) == *base) return -EBUSY; } for (i = 0; i < MAX_IO_WIN; i++) { struct resource *res = s->io[i].res; unsigned int try; if (res && (res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS)) continue; if (!res) { if (align == 0) align = 0x10000; res = s->io[i].res = __nonstatic_find_io_region(s, *base, num, align); if (!res) return -EINVAL; *base = res->start; s->io[i].res->flags = ((res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS)); s->io[i].InUse = num; *parent = res; return 0; } /* Try to extend top of window */ try = res->end + 1; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start, res->end + num); if (!ret) { ret = adjust_resource(s->io[i].res, res->start, res->end - res->start + num + 1); if (ret) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } } /* Try to extend bottom of window */ try = res->start - num; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start - num, res->end); if (!ret) { ret = adjust_resource(s->io[i].res, res->start - num, res->end - res->start + num + 1); if (ret) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } } } return -EINVAL; } static struct resource *nonstatic_find_mem_region(u_long base, u_long num, u_long align, int low, struct pcmcia_socket *s) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min, max; int ret, i, j; low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; data.offset = base & data.mask; for (i = 0; i < 2; i++) { data.map = &s_data->mem_db_valid; if (low) { max = 0x100000UL; min = base < max ? base : 0; } else { max = ~0UL; min = 0x100000UL + base; } for (j = 0; j < 2; j++) { #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif { ret = allocate_resource(&iomem_resource, res, num, min, max, 1, pcmcia_align, &data); } if (ret == 0) break; data.map = &s_data->mem_db; } if (ret == 0 || low) break; low = 1; } if (ret != 0) { kfree(res); res = NULL; } return res; } static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size = end - start + 1; int ret = 0; if (end < start) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: ret = add_interval(&data->mem_db, start, size); if (!ret) do_mem_probe(s, start, size, NULL, NULL); break; case REMOVE_MANAGED_RESOURCE: ret = sub_interval(&data->mem_db, start, size); break; default: ret = -EINVAL; } return ret; } static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size; int ret = 0; #if defined(CONFIG_X86) /* on x86, avoid anything < 0x100 for it is often used for * legacy platform devices */ if (start < 0x100) start = 0x100; #endif size = end - start + 1; if (end < start) return -EINVAL; if (end > IO_SPACE_LIMIT) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: if (add_interval(&data->io_db, start, size) != 0) { ret = -EBUSY; break; } #ifdef CONFIG_PCMCIA_PROBE if (probe_io) do_io_probe(s, start, size); #endif break; case REMOVE_MANAGED_RESOURCE: sub_interval(&data->io_db, start, size); break; default: ret = -EINVAL; break; } return ret; } #ifdef CONFIG_PCI static int nonstatic_autoadd_resources(struct pcmcia_socket *s) { struct resource *res; int i, done = 0; if (!s->cb_dev || !s->cb_dev->bus) return -ENODEV; #if defined(CONFIG_X86) /* If this is the root bus, the risk of hitting some strange * system devices is too high: If a driver isn't loaded, the * resources are not claimed; even if a driver is loaded, it * may not request all resources or even the wrong one. We * can neither trust the rest of the kernel nor ACPI/PNP and * CRS parsing to get it right. Therefore, use several * safeguards: * * - Do not auto-add resources if the CardBus bridge is on * the PCI root bus * * - Avoid any I/O ports < 0x100. * * - On PCI-PCI bridges, only use resources which are set up * exclusively for the secondary PCI bus: the risk of hitting * system devices is quite low, as they usually aren't * connected to the secondary PCI bus. */ if (s->cb_dev->bus->number == 0) return -EINVAL; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { res = s->cb_dev->bus->resource[i]; #else pci_bus_for_each_resource(s->cb_dev->bus, res, i) { #endif if (!res) continue; if (res->flags & IORESOURCE_IO) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &ioport_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_IO; } if (res->flags & IORESOURCE_MEM) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &iomem_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_MEM; } } /* if we got at least one of IO, and one of MEM, we can be glad and * activate the PCMCIA subsystem */ if (done == (IORESOURCE_MEM | IORESOURCE_IO)) s->resource_setup_done = 1; return 0; } #else static inline int nonstatic_autoadd_resources(struct pcmcia_socket *s) { return -ENODEV; } #endif static int nonstatic_init(struct pcmcia_socket *s) { struct socket_data *data; data = kzalloc(sizeof(struct socket_data), GFP_KERNEL); if (!data) return -ENOMEM; data->mem_db.next = &data->mem_db; data->mem_db_valid.next = &data->mem_db_valid; data->io_db.next = &data->io_db; s->resource_data = (void *) data; nonstatic_autoadd_resources(s); return 0; } static void nonstatic_release_resource_db(struct pcmcia_socket *s) { struct socket_data *data = s->resource_data; struct resource_map *p, *q; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) { q = p->next; kfree(p); } for (p = data->mem_db.next; p != &data->mem_db; p = q) { q = p->next; kfree(p); } for (p = data->io_db.next; p != &data->io_db; p = q) { q = p->next; kfree(p); } } struct pccard_resource_ops pccard_nonstatic_ops = { .validate_mem = pcmcia_nonstatic_validate_mem, .find_io = nonstatic_find_io, .find_mem = nonstatic_find_mem_region, .init = nonstatic_init, .exit = nonstatic_release_resource_db, }; EXPORT_SYMBOL(pccard_nonstatic_ops); /* sysfs interface to the resource database */ static ssize_t show_io_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->io_db.next; p != &data->io_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_io_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_io(s, add, start_addr, end_addr); mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db); static ssize_t show_mem_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_mem_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_memory(s, add, start_addr, end_addr); mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); static struct attribute *pccard_rsrc_attributes[] = { &dev_attr_available_resources_io.attr, &dev_attr_available_resources_mem.attr, NULL, }; static const struct attribute_group rsrc_attributes = { .attrs = pccard_rsrc_attributes, }; static int __devinit pccard_sysfs_add_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return 0; return sysfs_create_group(&dev->kobj, &rsrc_attributes); } static void __devexit pccard_sysfs_remove_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return; sysfs_remove_group(&dev->kobj, &rsrc_attributes); } static struct class_interface pccard_rsrc_interface __refdata = { .class = &pcmcia_socket_class, .add_dev = &pccard_sysfs_add_rsrc, .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc), }; static int __init nonstatic_sysfs_init(void) { return class_interface_register(&pccard_rsrc_interface); } static void __exit nonstatic_sysfs_exit(void) { class_interface_unregister(&pccard_rsrc_interface); } module_init(nonstatic_sysfs_init); module_exit(nonstatic_sysfs_exit);
gpl-2.0
hiikezoe/android_kernel_fujitsu_f12nad
drivers/staging/panel/panel.c
2690
61486
/* * Front panel driver for Linux * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This code drives an LCD module (/dev/lcd), and a keypad (/dev/keypad) * connected to a parallel printer port. * * The LCD module may either be an HD44780-like 8-bit parallel LCD, or a 1-bit * serial module compatible with Samsung's KS0074. The pins may be connected in * any combination, everything is programmable. * * The keypad consists in a matrix of push buttons connecting input pins to * data output pins or to the ground. The combinations have to be hard-coded * in the driver, though several profiles exist and adding new ones is easy. * * Several profiles are provided for commonly found LCD+keypad modules on the * market, such as those found in Nexcom's appliances. * * FIXME: * - the initialization/deinitialization process is very dirty and should * be rewritten. It may even be buggy. * * TODO: * - document 24 keys keyboard (3 rows of 8 cols, 32 diodes + 2 inputs) * - make the LCD a part of a virtual screen of Vx*Vy * - make the inputs list smp-safe * - change the keyboard to a double mapping : signals -> key_id -> values * so that applications can change values without knowing signals * */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/parport.h> #include <linux/version.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <generated/utsrelease.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/system.h> #define LCD_MINOR 156 #define KEYPAD_MINOR 185 #define PANEL_VERSION "0.9.5" #define LCD_MAXBYTES 256 /* max burst write */ #define KEYPAD_BUFFER 64 /* poll the keyboard this every second */ #define INPUT_POLL_TIME (HZ/50) /* a key starts to repeat after this times INPUT_POLL_TIME */ #define KEYPAD_REP_START (10) /* a key repeats this times INPUT_POLL_TIME */ #define KEYPAD_REP_DELAY (2) /* keep the light on this times INPUT_POLL_TIME for each flash */ #define FLASH_LIGHT_TEMPO (200) /* converts an r_str() input to an active high, bits string : 000BAOSE */ #define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3) #define PNL_PBUSY 0x80 /* inverted input, active low */ #define PNL_PACK 0x40 /* direct input, active low */ #define PNL_POUTPA 0x20 /* direct input, active high */ #define PNL_PSELECD 0x10 /* direct input, active high */ #define PNL_PERRORP 0x08 /* direct input, active low */ #define PNL_PBIDIR 0x20 /* bi-directional ports */ /* high to read data in or-ed with data out */ #define PNL_PINTEN 0x10 #define PNL_PSELECP 0x08 /* inverted output, active low */ #define PNL_PINITP 0x04 /* direct output, active low */ #define PNL_PAUTOLF 0x02 /* inverted output, active low */ #define PNL_PSTROBE 0x01 /* inverted output */ #define PNL_PD0 0x01 #define PNL_PD1 0x02 #define PNL_PD2 0x04 #define PNL_PD3 0x08 #define PNL_PD4 0x10 #define PNL_PD5 0x20 #define PNL_PD6 0x40 #define PNL_PD7 0x80 #define PIN_NONE 0 #define PIN_STROBE 1 #define PIN_D0 2 #define PIN_D1 3 #define PIN_D2 4 #define PIN_D3 5 #define PIN_D4 6 #define PIN_D5 7 #define PIN_D6 8 #define PIN_D7 9 #define PIN_AUTOLF 14 #define PIN_INITP 16 #define PIN_SELECP 17 #define PIN_NOT_SET 127 #define LCD_FLAG_S 0x0001 #define LCD_FLAG_ID 0x0002 #define LCD_FLAG_B 0x0004 /* blink on */ #define LCD_FLAG_C 0x0008 /* cursor on */ #define LCD_FLAG_D 0x0010 /* display on */ #define LCD_FLAG_F 0x0020 /* large font mode */ #define LCD_FLAG_N 0x0040 /* 2-rows mode */ #define LCD_FLAG_L 0x0080 /* backlight enabled */ #define LCD_ESCAPE_LEN 24 /* max chars for LCD escape command */ #define LCD_ESCAPE_CHAR 27 /* use char 27 for escape command */ /* macros to simplify use of the parallel port */ #define r_ctr(x) (parport_read_control((x)->port)) #define r_dtr(x) (parport_read_data((x)->port)) #define r_str(x) (parport_read_status((x)->port)) #define w_ctr(x, y) do { parport_write_control((x)->port, (y)); } while (0) #define w_dtr(x, y) do { parport_write_data((x)->port, (y)); } while (0) /* this defines which bits are to be used and which ones to be ignored */ /* logical or of the output bits involved in the scan matrix */ static __u8 scan_mask_o; /* logical or of the input bits involved in the scan matrix */ static __u8 scan_mask_i; typedef __u64 pmask_t; enum input_type { INPUT_TYPE_STD, INPUT_TYPE_KBD, }; enum input_state { INPUT_ST_LOW, INPUT_ST_RISING, INPUT_ST_HIGH, INPUT_ST_FALLING, }; struct logical_input { struct list_head list; pmask_t mask; pmask_t value; enum input_type type; enum input_state state; __u8 rise_time, fall_time; __u8 rise_timer, fall_timer, high_timer; union { struct { /* valid when type == INPUT_TYPE_STD */ void (*press_fct) (int); void (*release_fct) (int); int press_data; int release_data; } std; struct { /* valid when type == INPUT_TYPE_KBD */ /* strings can be non null-terminated */ char press_str[sizeof(void *) + sizeof(int)]; char repeat_str[sizeof(void *) + sizeof(int)]; char release_str[sizeof(void *) + sizeof(int)]; } kbd; } u; }; LIST_HEAD(logical_inputs); /* list of all defined logical inputs */ /* physical contacts history * Physical contacts are a 45 bits string of 9 groups of 5 bits each. * The 8 lower groups correspond to output bits 0 to 7, and the 9th group * corresponds to the ground. * Within each group, bits are stored in the same order as read on the port : * BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0). * So, each __u64 (or pmask_t) is represented like this : * 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE * <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00> */ /* what has just been read from the I/O ports */ static pmask_t phys_read; /* previous phys_read */ static pmask_t phys_read_prev; /* stabilized phys_read (phys_read|phys_read_prev) */ static pmask_t phys_curr; /* previous phys_curr */ static pmask_t phys_prev; /* 0 means that at least one logical signal needs be computed */ static char inputs_stable; /* these variables are specific to the keypad */ static char keypad_buffer[KEYPAD_BUFFER]; static int keypad_buflen; static int keypad_start; static char keypressed; static wait_queue_head_t keypad_read_wait; /* lcd-specific variables */ /* contains the LCD config state */ static unsigned long int lcd_flags; /* contains the LCD X offset */ static unsigned long int lcd_addr_x; /* contains the LCD Y offset */ static unsigned long int lcd_addr_y; /* current escape sequence, 0 terminated */ static char lcd_escape[LCD_ESCAPE_LEN + 1]; /* not in escape state. >=0 = escape cmd len */ static int lcd_escape_len = -1; /* * Bit masks to convert LCD signals to parallel port outputs. * _d_ are values for data port, _c_ are for control port. * [0] = signal OFF, [1] = signal ON, [2] = mask */ #define BIT_CLR 0 #define BIT_SET 1 #define BIT_MSK 2 #define BIT_STATES 3 /* * one entry for each bit on the LCD */ #define LCD_BIT_E 0 #define LCD_BIT_RS 1 #define LCD_BIT_RW 2 #define LCD_BIT_BL 3 #define LCD_BIT_CL 4 #define LCD_BIT_DA 5 #define LCD_BITS 6 /* * each bit can be either connected to a DATA or CTRL port */ #define LCD_PORT_C 0 #define LCD_PORT_D 1 #define LCD_PORTS 2 static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES]; /* * LCD protocols */ #define LCD_PROTO_PARALLEL 0 #define LCD_PROTO_SERIAL 1 #define LCD_PROTO_TI_DA8XX_LCD 2 /* * LCD character sets */ #define LCD_CHARSET_NORMAL 0 #define LCD_CHARSET_KS0074 1 /* * LCD types */ #define LCD_TYPE_NONE 0 #define LCD_TYPE_OLD 1 #define LCD_TYPE_KS0074 2 #define LCD_TYPE_HANTRONIX 3 #define LCD_TYPE_NEXCOM 4 #define LCD_TYPE_CUSTOM 5 /* * keypad types */ #define KEYPAD_TYPE_NONE 0 #define KEYPAD_TYPE_OLD 1 #define KEYPAD_TYPE_NEW 2 #define KEYPAD_TYPE_NEXCOM 3 /* * panel profiles */ #define PANEL_PROFILE_CUSTOM 0 #define PANEL_PROFILE_OLD 1 #define PANEL_PROFILE_NEW 2 #define PANEL_PROFILE_HANTRONIX 3 #define PANEL_PROFILE_NEXCOM 4 #define PANEL_PROFILE_LARGE 5 /* * Construct custom config from the kernel's configuration */ #define DEFAULT_PROFILE PANEL_PROFILE_LARGE #define DEFAULT_PARPORT 0 #define DEFAULT_LCD LCD_TYPE_OLD #define DEFAULT_KEYPAD KEYPAD_TYPE_OLD #define DEFAULT_LCD_WIDTH 40 #define DEFAULT_LCD_BWIDTH 40 #define DEFAULT_LCD_HWIDTH 64 #define DEFAULT_LCD_HEIGHT 2 #define DEFAULT_LCD_PROTO LCD_PROTO_PARALLEL #define DEFAULT_LCD_PIN_E PIN_AUTOLF #define DEFAULT_LCD_PIN_RS PIN_SELECP #define DEFAULT_LCD_PIN_RW PIN_INITP #define DEFAULT_LCD_PIN_SCL PIN_STROBE #define DEFAULT_LCD_PIN_SDA PIN_D0 #define DEFAULT_LCD_PIN_BL PIN_NOT_SET #define DEFAULT_LCD_CHARSET LCD_CHARSET_NORMAL #ifdef CONFIG_PANEL_PROFILE #undef DEFAULT_PROFILE #define DEFAULT_PROFILE CONFIG_PANEL_PROFILE #endif #ifdef CONFIG_PANEL_PARPORT #undef DEFAULT_PARPORT #define DEFAULT_PARPORT CONFIG_PANEL_PARPORT #endif #if DEFAULT_PROFILE == 0 /* custom */ #ifdef CONFIG_PANEL_KEYPAD #undef DEFAULT_KEYPAD #define DEFAULT_KEYPAD CONFIG_PANEL_KEYPAD #endif #ifdef CONFIG_PANEL_LCD #undef DEFAULT_LCD #define DEFAULT_LCD CONFIG_PANEL_LCD #endif #ifdef CONFIG_PANEL_LCD_WIDTH #undef DEFAULT_LCD_WIDTH #define DEFAULT_LCD_WIDTH CONFIG_PANEL_LCD_WIDTH #endif #ifdef CONFIG_PANEL_LCD_BWIDTH #undef DEFAULT_LCD_BWIDTH #define DEFAULT_LCD_BWIDTH CONFIG_PANEL_LCD_BWIDTH #endif #ifdef CONFIG_PANEL_LCD_HWIDTH #undef DEFAULT_LCD_HWIDTH #define DEFAULT_LCD_HWIDTH CONFIG_PANEL_LCD_HWIDTH #endif #ifdef CONFIG_PANEL_LCD_HEIGHT #undef DEFAULT_LCD_HEIGHT #define DEFAULT_LCD_HEIGHT CONFIG_PANEL_LCD_HEIGHT #endif #ifdef CONFIG_PANEL_LCD_PROTO #undef DEFAULT_LCD_PROTO #define DEFAULT_LCD_PROTO CONFIG_PANEL_LCD_PROTO #endif #ifdef CONFIG_PANEL_LCD_PIN_E #undef DEFAULT_LCD_PIN_E #define DEFAULT_LCD_PIN_E CONFIG_PANEL_LCD_PIN_E #endif #ifdef CONFIG_PANEL_LCD_PIN_RS #undef DEFAULT_LCD_PIN_RS #define DEFAULT_LCD_PIN_RS CONFIG_PANEL_LCD_PIN_RS #endif #ifdef CONFIG_PANEL_LCD_PIN_RW #undef DEFAULT_LCD_PIN_RW #define DEFAULT_LCD_PIN_RW CONFIG_PANEL_LCD_PIN_RW #endif #ifdef CONFIG_PANEL_LCD_PIN_SCL #undef DEFAULT_LCD_PIN_SCL #define DEFAULT_LCD_PIN_SCL CONFIG_PANEL_LCD_PIN_SCL #endif #ifdef CONFIG_PANEL_LCD_PIN_SDA #undef DEFAULT_LCD_PIN_SDA #define DEFAULT_LCD_PIN_SDA CONFIG_PANEL_LCD_PIN_SDA #endif #ifdef CONFIG_PANEL_LCD_PIN_BL #undef DEFAULT_LCD_PIN_BL #define DEFAULT_LCD_PIN_BL CONFIG_PANEL_LCD_PIN_BL #endif #ifdef CONFIG_PANEL_LCD_CHARSET #undef DEFAULT_LCD_CHARSET #define DEFAULT_LCD_CHARSET CONFIG_PANEL_LCD_CHARSET #endif #endif /* DEFAULT_PROFILE == 0 */ /* global variables */ static int keypad_open_cnt; /* #times opened */ static int lcd_open_cnt; /* #times opened */ static struct pardevice *pprt; static int lcd_initialized; static int keypad_initialized; static int light_tempo; static char lcd_must_clear; static char lcd_left_shift; static char init_in_progress; static void (*lcd_write_cmd) (int); static void (*lcd_write_data) (int); static void (*lcd_clear_fast) (void); static DEFINE_SPINLOCK(pprt_lock); static struct timer_list scan_timer; MODULE_DESCRIPTION("Generic parallel port LCD/Keypad driver"); static int parport = -1; module_param(parport, int, 0000); MODULE_PARM_DESC(parport, "Parallel port index (0=lpt1, 1=lpt2, ...)"); static int lcd_height = -1; module_param(lcd_height, int, 0000); MODULE_PARM_DESC(lcd_height, "Number of lines on the LCD"); static int lcd_width = -1; module_param(lcd_width, int, 0000); MODULE_PARM_DESC(lcd_width, "Number of columns on the LCD"); static int lcd_bwidth = -1; /* internal buffer width (usually 40) */ module_param(lcd_bwidth, int, 0000); MODULE_PARM_DESC(lcd_bwidth, "Internal LCD line width (40)"); static int lcd_hwidth = -1; /* hardware buffer width (usually 64) */ module_param(lcd_hwidth, int, 0000); MODULE_PARM_DESC(lcd_hwidth, "LCD line hardware address (64)"); static int lcd_enabled = -1; module_param(lcd_enabled, int, 0000); MODULE_PARM_DESC(lcd_enabled, "Deprecated option, use lcd_type instead"); static int keypad_enabled = -1; module_param(keypad_enabled, int, 0000); MODULE_PARM_DESC(keypad_enabled, "Deprecated option, use keypad_type instead"); static int lcd_type = -1; module_param(lcd_type, int, 0000); MODULE_PARM_DESC(lcd_type, "LCD type: 0=none, 1=old //, 2=serial ks0074, " "3=hantronix //, 4=nexcom //, 5=compiled-in"); static int lcd_proto = -1; module_param(lcd_proto, int, 0000); MODULE_PARM_DESC(lcd_proto, "LCD communication: 0=parallel (//), 1=serial," "2=TI LCD Interface"); static int lcd_charset = -1; module_param(lcd_charset, int, 0000); MODULE_PARM_DESC(lcd_charset, "LCD character set: 0=standard, 1=KS0074"); static int keypad_type = -1; module_param(keypad_type, int, 0000); MODULE_PARM_DESC(keypad_type, "Keypad type: 0=none, 1=old 6 keys, 2=new 6+1 keys, " "3=nexcom 4 keys"); static int profile = DEFAULT_PROFILE; module_param(profile, int, 0000); MODULE_PARM_DESC(profile, "1=16x2 old kp; 2=serial 16x2, new kp; 3=16x2 hantronix; " "4=16x2 nexcom; default=40x2, old kp"); /* * These are the parallel port pins the LCD control signals are connected to. * Set this to 0 if the signal is not used. Set it to its opposite value * (negative) if the signal is negated. -MAXINT is used to indicate that the * pin has not been explicitly specified. * * WARNING! no check will be performed about collisions with keypad ! */ static int lcd_e_pin = PIN_NOT_SET; module_param(lcd_e_pin, int, 0000); MODULE_PARM_DESC(lcd_e_pin, "# of the // port pin connected to LCD 'E' signal, " "with polarity (-17..17)"); static int lcd_rs_pin = PIN_NOT_SET; module_param(lcd_rs_pin, int, 0000); MODULE_PARM_DESC(lcd_rs_pin, "# of the // port pin connected to LCD 'RS' signal, " "with polarity (-17..17)"); static int lcd_rw_pin = PIN_NOT_SET; module_param(lcd_rw_pin, int, 0000); MODULE_PARM_DESC(lcd_rw_pin, "# of the // port pin connected to LCD 'RW' signal, " "with polarity (-17..17)"); static int lcd_bl_pin = PIN_NOT_SET; module_param(lcd_bl_pin, int, 0000); MODULE_PARM_DESC(lcd_bl_pin, "# of the // port pin connected to LCD backlight, " "with polarity (-17..17)"); static int lcd_da_pin = PIN_NOT_SET; module_param(lcd_da_pin, int, 0000); MODULE_PARM_DESC(lcd_da_pin, "# of the // port pin connected to serial LCD 'SDA' " "signal, with polarity (-17..17)"); static int lcd_cl_pin = PIN_NOT_SET; module_param(lcd_cl_pin, int, 0000); MODULE_PARM_DESC(lcd_cl_pin, "# of the // port pin connected to serial LCD 'SCL' " "signal, with polarity (-17..17)"); static unsigned char *lcd_char_conv; /* for some LCD drivers (ks0074) we need a charset conversion table. */ static unsigned char lcd_char_conv_ks0074[256] = { /* 0|8 1|9 2|A 3|B 4|C 5|D 6|E 7|F */ /* 0x00 */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x08 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x10 */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x18 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x20 */ 0x20, 0x21, 0x22, 0x23, 0xa2, 0x25, 0x26, 0x27, /* 0x28 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x30 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x38 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x40 */ 0xa0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x48 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x50 */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x58 */ 0x58, 0x59, 0x5a, 0xfa, 0xfb, 0xfc, 0x1d, 0xc4, /* 0x60 */ 0x96, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x68 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x70 */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x78 */ 0x78, 0x79, 0x7a, 0xfd, 0xfe, 0xff, 0xce, 0x20, /* 0x80 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x88 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x90 */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x98 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xA0 */ 0x20, 0x40, 0xb1, 0xa1, 0x24, 0xa3, 0xfe, 0x5f, /* 0xA8 */ 0x22, 0xc8, 0x61, 0x14, 0x97, 0x2d, 0xad, 0x96, /* 0xB0 */ 0x80, 0x8c, 0x82, 0x83, 0x27, 0x8f, 0x86, 0xdd, /* 0xB8 */ 0x2c, 0x81, 0x6f, 0x15, 0x8b, 0x8a, 0x84, 0x60, /* 0xC0 */ 0xe2, 0xe2, 0xe2, 0x5b, 0x5b, 0xae, 0xbc, 0xa9, /* 0xC8 */ 0xc5, 0xbf, 0xc6, 0xf1, 0xe3, 0xe3, 0xe3, 0xe3, /* 0xD0 */ 0x44, 0x5d, 0xa8, 0xe4, 0xec, 0xec, 0x5c, 0x78, /* 0xD8 */ 0xab, 0xa6, 0xe5, 0x5e, 0x5e, 0xe6, 0xaa, 0xbe, /* 0xE0 */ 0x7f, 0xe7, 0xaf, 0x7b, 0x7b, 0xaf, 0xbd, 0xc8, /* 0xE8 */ 0xa4, 0xa5, 0xc7, 0xf6, 0xa7, 0xe8, 0x69, 0x69, /* 0xF0 */ 0xed, 0x7d, 0xa8, 0xe4, 0xec, 0x5c, 0x5c, 0x25, /* 0xF8 */ 0xac, 0xa6, 0xea, 0xef, 0x7e, 0xeb, 0xb2, 0x79, }; char old_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4", "Esc\n", "Esc\n", ""}, {"S5", "Ret\n", "Ret\n", ""}, {"", "", "", ""} }; /* signals, press, repeat, release */ char new_keypad_profile[][4][9] = { {"S0", "Left\n", "Left\n", ""}, {"S1", "Down\n", "Down\n", ""}, {"S2", "Up\n", "Up\n", ""}, {"S3", "Right\n", "Right\n", ""}, {"S4s5", "", "Esc\n", "Esc\n"}, {"s4S5", "", "Ret\n", "Ret\n"}, {"S4S5", "Help\n", "", ""}, /* add new signals above this line */ {"", "", "", ""} }; /* signals, press, repeat, release */ char nexcom_keypad_profile[][4][9] = { {"a-p-e-", "Down\n", "Down\n", ""}, {"a-p-E-", "Ret\n", "Ret\n", ""}, {"a-P-E-", "Esc\n", "Esc\n", ""}, {"a-P-e-", "Up\n", "Up\n", ""}, /* add new signals above this line */ {"", "", "", ""} }; static char (*keypad_profile)[4][9] = old_keypad_profile; /* FIXME: this should be converted to a bit array containing signals states */ static struct { unsigned char e; /* parallel LCD E (data latch on falling edge) */ unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */ unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */ unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */ unsigned char cl; /* serial LCD clock (latch on rising edge) */ unsigned char da; /* serial LCD data */ } bits; static void init_scan_timer(void); /* sets data port bits according to current signals values */ static int set_data_bits(void) { int val, bit; val = r_dtr(pprt); for (bit = 0; bit < LCD_BITS; bit++) val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK]; val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e] | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs] | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw] | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl] | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl] | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da]; w_dtr(pprt, val); return val; } /* sets ctrl port bits according to current signals values */ static int set_ctrl_bits(void) { int val, bit; val = r_ctr(pprt); for (bit = 0; bit < LCD_BITS; bit++) val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK]; val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e] | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs] | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw] | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl] | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl] | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da]; w_ctr(pprt, val); return val; } /* sets ctrl & data port bits according to current signals values */ static void panel_set_bits(void) { set_data_bits(); set_ctrl_bits(); } /* * Converts a parallel port pin (from -25 to 25) to data and control ports * masks, and data and control port bits. The signal will be considered * unconnected if it's on pin 0 or an invalid pin (<-25 or >25). * * Result will be used this way : * out(dport, in(dport) & d_val[2] | d_val[signal_state]) * out(cport, in(cport) & c_val[2] | c_val[signal_state]) */ void pin_to_bits(int pin, unsigned char *d_val, unsigned char *c_val) { int d_bit, c_bit, inv; d_val[0] = c_val[0] = d_val[1] = c_val[1] = 0; d_val[2] = c_val[2] = 0xFF; if (pin == 0) return; inv = (pin < 0); if (inv) pin = -pin; d_bit = c_bit = 0; switch (pin) { case PIN_STROBE: /* strobe, inverted */ c_bit = PNL_PSTROBE; inv = !inv; break; case PIN_D0...PIN_D7: /* D0 - D7 = 2 - 9 */ d_bit = 1 << (pin - 2); break; case PIN_AUTOLF: /* autofeed, inverted */ c_bit = PNL_PAUTOLF; inv = !inv; break; case PIN_INITP: /* init, direct */ c_bit = PNL_PINITP; break; case PIN_SELECP: /* select_in, inverted */ c_bit = PNL_PSELECP; inv = !inv; break; default: /* unknown pin, ignore */ break; } if (c_bit) { c_val[2] &= ~c_bit; c_val[!inv] = c_bit; } else if (d_bit) { d_val[2] &= ~d_bit; d_val[!inv] = d_bit; } } /* sleeps that many milliseconds with a reschedule */ static void long_sleep(int ms) { if (in_interrupt()) mdelay(ms); else { current->state = TASK_INTERRUPTIBLE; schedule_timeout((ms * HZ + 999) / 1000); } } /* send a serial byte to the LCD panel. The caller is responsible for locking if needed. */ static void lcd_send_serial(int byte) { int bit; /* the data bit is set on D0, and the clock on STROBE. * LCD reads D0 on STROBE's rising edge. */ for (bit = 0; bit < 8; bit++) { bits.cl = BIT_CLR; /* CLK low */ panel_set_bits(); bits.da = byte & 1; panel_set_bits(); udelay(2); /* maintain the data during 2 us before CLK up */ bits.cl = BIT_SET; /* CLK high */ panel_set_bits(); udelay(1); /* maintain the strobe during 1 us */ byte >>= 1; } } /* turn the backlight on or off */ static void lcd_backlight(int on) { if (lcd_bl_pin == PIN_NONE) return; /* The backlight is activated by seting the AUTOFEED line to +5V */ spin_lock(&pprt_lock); bits.bl = on; panel_set_bits(); spin_unlock(&pprt_lock); } /* send a command to the LCD panel in serial mode */ static void lcd_write_cmd_s(int cmd) { spin_lock(&pprt_lock); lcd_send_serial(0x1F); /* R/W=W, RS=0 */ lcd_send_serial(cmd & 0x0F); lcd_send_serial((cmd >> 4) & 0x0F); udelay(40); /* the shortest command takes at least 40 us */ spin_unlock(&pprt_lock); } /* send data to the LCD panel in serial mode */ static void lcd_write_data_s(int data) { spin_lock(&pprt_lock); lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(data & 0x0F); lcd_send_serial((data >> 4) & 0x0F); udelay(40); /* the shortest data takes at least 40 us */ spin_unlock(&pprt_lock); } /* send a command to the LCD panel in 8 bits parallel mode */ static void lcd_write_cmd_p8(int cmd) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, cmd); udelay(20); /* maintain the data during 20 us before the strobe */ bits.e = BIT_SET; bits.rs = BIT_CLR; bits.rw = BIT_CLR; set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ bits.e = BIT_CLR; set_ctrl_bits(); udelay(120); /* the shortest command takes at least 120 us */ spin_unlock(&pprt_lock); } /* send data to the LCD panel in 8 bits parallel mode */ static void lcd_write_data_p8(int data) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(20); /* maintain the data during 20 us before the strobe */ bits.e = BIT_SET; bits.rs = BIT_SET; bits.rw = BIT_CLR; set_ctrl_bits(); udelay(40); /* maintain the strobe during 40 us */ bits.e = BIT_CLR; set_ctrl_bits(); udelay(45); /* the shortest data takes at least 45 us */ spin_unlock(&pprt_lock); } /* send a command to the TI LCD panel */ static void lcd_write_cmd_tilcd(int cmd) { spin_lock(&pprt_lock); /* present the data to the control port */ w_ctr(pprt, cmd); udelay(60); spin_unlock(&pprt_lock); } /* send data to the TI LCD panel */ static void lcd_write_data_tilcd(int data) { spin_lock(&pprt_lock); /* present the data to the data port */ w_dtr(pprt, data); udelay(60); spin_unlock(&pprt_lock); } static void lcd_gotoxy(void) { lcd_write_cmd(0x80 /* set DDRAM address */ | (lcd_addr_y ? lcd_hwidth : 0) /* we force the cursor to stay at the end of the line if it wants to go farther */ | ((lcd_addr_x < lcd_bwidth) ? lcd_addr_x & (lcd_hwidth - 1) : lcd_bwidth - 1)); } static void lcd_print(char c) { if (lcd_addr_x < lcd_bwidth) { if (lcd_char_conv != NULL) c = lcd_char_conv[(unsigned char)c]; lcd_write_data(c); lcd_addr_x++; } /* prevents the cursor from wrapping onto the next line */ if (lcd_addr_x == lcd_bwidth) lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_s(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { lcd_send_serial(0x5F); /* R/W=W, RS=1 */ lcd_send_serial(' ' & 0x0F); lcd_send_serial((' ' >> 4) & 0x0F); udelay(40); /* the shortest data takes at least 40 us */ } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_p8(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { /* present the data to the data port */ w_dtr(pprt, ' '); /* maintain the data during 20 us before the strobe */ udelay(20); bits.e = BIT_SET; bits.rs = BIT_SET; bits.rw = BIT_CLR; set_ctrl_bits(); /* maintain the strobe during 40 us */ udelay(40); bits.e = BIT_CLR; set_ctrl_bits(); /* the shortest data takes at least 45 us */ udelay(45); } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* fills the display with spaces and resets X/Y */ static void lcd_clear_fast_tilcd(void) { int pos; lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); spin_lock(&pprt_lock); for (pos = 0; pos < lcd_height * lcd_hwidth; pos++) { /* present the data to the data port */ w_dtr(pprt, ' '); udelay(60); } spin_unlock(&pprt_lock); lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); } /* clears the display and resets X/Y */ static void lcd_clear_display(void) { lcd_write_cmd(0x01); /* clear display */ lcd_addr_x = lcd_addr_y = 0; /* we must wait a few milliseconds (15) */ long_sleep(15); } static void lcd_init_display(void) { lcd_flags = ((lcd_height > 1) ? LCD_FLAG_N : 0) | LCD_FLAG_D | LCD_FLAG_C | LCD_FLAG_B; long_sleep(20); /* wait 20 ms after power-up for the paranoid */ lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30); /* 8bits, 1 line, small fonts */ long_sleep(10); lcd_write_cmd(0x30 /* set font height and lines number */ | ((lcd_flags & LCD_FLAG_F) ? 4 : 0) | ((lcd_flags & LCD_FLAG_N) ? 8 : 0) ); long_sleep(10); lcd_write_cmd(0x08); /* display off, cursor off, blink off */ long_sleep(10); lcd_write_cmd(0x08 /* set display mode */ | ((lcd_flags & LCD_FLAG_D) ? 4 : 0) | ((lcd_flags & LCD_FLAG_C) ? 2 : 0) | ((lcd_flags & LCD_FLAG_B) ? 1 : 0) ); lcd_backlight((lcd_flags & LCD_FLAG_L) ? 1 : 0); long_sleep(10); /* entry mode set : increment, cursor shifting */ lcd_write_cmd(0x06); lcd_clear_display(); } /* * These are the file operation function for user access to /dev/lcd * This function can also be called from inside the kernel, by * setting file and ppos to NULL. * */ static inline int handle_lcd_special_code(void) { /* LCD special codes */ int processed = 0; char *esc = lcd_escape + 2; int oldflags = lcd_flags; /* check for display mode flags */ switch (*esc) { case 'D': /* Display ON */ lcd_flags |= LCD_FLAG_D; processed = 1; break; case 'd': /* Display OFF */ lcd_flags &= ~LCD_FLAG_D; processed = 1; break; case 'C': /* Cursor ON */ lcd_flags |= LCD_FLAG_C; processed = 1; break; case 'c': /* Cursor OFF */ lcd_flags &= ~LCD_FLAG_C; processed = 1; break; case 'B': /* Blink ON */ lcd_flags |= LCD_FLAG_B; processed = 1; break; case 'b': /* Blink OFF */ lcd_flags &= ~LCD_FLAG_B; processed = 1; break; case '+': /* Back light ON */ lcd_flags |= LCD_FLAG_L; processed = 1; break; case '-': /* Back light OFF */ lcd_flags &= ~LCD_FLAG_L; processed = 1; break; case '*': /* flash back light using the keypad timer */ if (scan_timer.function != NULL) { if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(1); light_tempo = FLASH_LIGHT_TEMPO; } processed = 1; break; case 'f': /* Small Font */ lcd_flags &= ~LCD_FLAG_F; processed = 1; break; case 'F': /* Large Font */ lcd_flags |= LCD_FLAG_F; processed = 1; break; case 'n': /* One Line */ lcd_flags &= ~LCD_FLAG_N; processed = 1; break; case 'N': /* Two Lines */ lcd_flags |= LCD_FLAG_N; break; case 'l': /* Shift Cursor Left */ if (lcd_addr_x > 0) { /* back one char if not at end of line */ if (lcd_addr_x < lcd_bwidth) lcd_write_cmd(0x10); lcd_addr_x--; } processed = 1; break; case 'r': /* shift cursor right */ if (lcd_addr_x < lcd_width) { /* allow the cursor to pass the end of the line */ if (lcd_addr_x < (lcd_bwidth - 1)) lcd_write_cmd(0x14); lcd_addr_x++; } processed = 1; break; case 'L': /* shift display left */ lcd_left_shift++; lcd_write_cmd(0x18); processed = 1; break; case 'R': /* shift display right */ lcd_left_shift--; lcd_write_cmd(0x1C); processed = 1; break; case 'k': { /* kill end of line */ int x; for (x = lcd_addr_x; x < lcd_bwidth; x++) lcd_write_data(' '); /* restore cursor position */ lcd_gotoxy(); processed = 1; break; } case 'I': /* reinitialize display */ lcd_init_display(); lcd_left_shift = 0; processed = 1; break; case 'G': { /* Generator : LGcxxxxx...xx; must have <c> between '0' * and '7', representing the numerical ASCII code of the * redefined character, and <xx...xx> a sequence of 16 * hex digits representing 8 bytes for each character. * Most LCDs will only use 5 lower bits of the 7 first * bytes. */ unsigned char cgbytes[8]; unsigned char cgaddr; int cgoffset; int shift; char value; int addr; if (strchr(esc, ';') == NULL) break; esc++; cgaddr = *(esc++) - '0'; if (cgaddr > 7) { processed = 1; break; } cgoffset = 0; shift = 0; value = 0; while (*esc && cgoffset < 8) { shift ^= 4; if (*esc >= '0' && *esc <= '9') value |= (*esc - '0') << shift; else if (*esc >= 'A' && *esc <= 'Z') value |= (*esc - 'A' + 10) << shift; else if (*esc >= 'a' && *esc <= 'z') value |= (*esc - 'a' + 10) << shift; else { esc++; continue; } if (shift == 0) { cgbytes[cgoffset++] = value; value = 0; } esc++; } lcd_write_cmd(0x40 | (cgaddr * 8)); for (addr = 0; addr < cgoffset; addr++) lcd_write_data(cgbytes[addr]); /* ensures that we stop writing to CGRAM */ lcd_gotoxy(); processed = 1; break; } case 'x': /* gotoxy : LxXXX[yYYY]; */ case 'y': /* gotoxy : LyYYY[xXXX]; */ if (strchr(esc, ';') == NULL) break; while (*esc) { char *endp; if (*esc == 'x') { esc++; lcd_addr_x = simple_strtoul(esc, &endp, 10); esc = endp; } else if (*esc == 'y') { esc++; lcd_addr_y = simple_strtoul(esc, &endp, 10); esc = endp; } else break; } lcd_gotoxy(); processed = 1; break; } /* Check wether one flag was changed */ if (oldflags != lcd_flags) { /* check whether one of B,C,D flags were changed */ if ((oldflags ^ lcd_flags) & (LCD_FLAG_B | LCD_FLAG_C | LCD_FLAG_D)) /* set display mode */ lcd_write_cmd(0x08 | ((lcd_flags & LCD_FLAG_D) ? 4 : 0) | ((lcd_flags & LCD_FLAG_C) ? 2 : 0) | ((lcd_flags & LCD_FLAG_B) ? 1 : 0)); /* check whether one of F,N flags was changed */ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_F | LCD_FLAG_N)) lcd_write_cmd(0x30 | ((lcd_flags & LCD_FLAG_F) ? 4 : 0) | ((lcd_flags & LCD_FLAG_N) ? 8 : 0)); /* check wether L flag was changed */ else if ((oldflags ^ lcd_flags) & (LCD_FLAG_L)) { if (lcd_flags & (LCD_FLAG_L)) lcd_backlight(1); else if (light_tempo == 0) /* switch off the light only when the tempo lighting is gone */ lcd_backlight(0); } } return processed; } static ssize_t lcd_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { const char *tmp = buf; char c; for (; count-- > 0; (ppos ? (*ppos)++ : 0), ++tmp) { if (!in_interrupt() && (((count + 1) & 0x1f) == 0)) /* let's be a little nice with other processes that need some CPU */ schedule(); if (ppos == NULL && file == NULL) /* let's not use get_user() from the kernel ! */ c = *tmp; else if (get_user(c, tmp)) return -EFAULT; /* first, we'll test if we're in escape mode */ if ((c != '\n') && lcd_escape_len >= 0) { /* yes, let's add this char to the buffer */ lcd_escape[lcd_escape_len++] = c; lcd_escape[lcd_escape_len] = 0; } else { /* aborts any previous escape sequence */ lcd_escape_len = -1; switch (c) { case LCD_ESCAPE_CHAR: /* start of an escape sequence */ lcd_escape_len = 0; lcd_escape[lcd_escape_len] = 0; break; case '\b': /* go back one char and clear it */ if (lcd_addr_x > 0) { /* check if we're not at the end of the line */ if (lcd_addr_x < lcd_bwidth) /* back one char */ lcd_write_cmd(0x10); lcd_addr_x--; } /* replace with a space */ lcd_write_data(' '); /* back one char again */ lcd_write_cmd(0x10); break; case '\014': /* quickly clear the display */ lcd_clear_fast(); break; case '\n': /* flush the remainder of the current line and go to the beginning of the next line */ for (; lcd_addr_x < lcd_bwidth; lcd_addr_x++) lcd_write_data(' '); lcd_addr_x = 0; lcd_addr_y = (lcd_addr_y + 1) % lcd_height; lcd_gotoxy(); break; case '\r': /* go to the beginning of the same line */ lcd_addr_x = 0; lcd_gotoxy(); break; case '\t': /* print a space instead of the tab */ lcd_print(' '); break; default: /* simply print this char */ lcd_print(c); break; } } /* now we'll see if we're in an escape mode and if the current escape sequence can be understood. */ if (lcd_escape_len >= 2) { int processed = 0; if (!strcmp(lcd_escape, "[2J")) { /* clear the display */ lcd_clear_fast(); processed = 1; } else if (!strcmp(lcd_escape, "[H")) { /* cursor to home */ lcd_addr_x = lcd_addr_y = 0; lcd_gotoxy(); processed = 1; } /* codes starting with ^[[L */ else if ((lcd_escape_len >= 3) && (lcd_escape[0] == '[') && (lcd_escape[1] == 'L')) { processed = handle_lcd_special_code(); } /* LCD special escape codes */ /* flush the escape sequence if it's been processed or if it is getting too long. */ if (processed || (lcd_escape_len >= LCD_ESCAPE_LEN)) lcd_escape_len = -1; } /* escape codes */ } return tmp - buf; } static int lcd_open(struct inode *inode, struct file *file) { if (lcd_open_cnt) return -EBUSY; /* open only once at a time */ if (file->f_mode & FMODE_READ) /* device is write-only */ return -EPERM; if (lcd_must_clear) { lcd_clear_display(); lcd_must_clear = 0; } lcd_open_cnt++; return nonseekable_open(inode, file); } static int lcd_release(struct inode *inode, struct file *file) { lcd_open_cnt--; return 0; } static const struct file_operations lcd_fops = { .write = lcd_write, .open = lcd_open, .release = lcd_release, .llseek = no_llseek, }; static struct miscdevice lcd_dev = { LCD_MINOR, "lcd", &lcd_fops }; /* public function usable from the kernel for any purpose */ void panel_lcd_print(char *s) { if (lcd_enabled && lcd_initialized) lcd_write(NULL, s, strlen(s), NULL); } /* initialize the LCD driver */ void lcd_init(void) { switch (lcd_type) { case LCD_TYPE_OLD: /* parallel mode, 8 bits */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_STROBE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_AUTOLF; if (lcd_width < 0) lcd_width = 40; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_KS0074: /* serial mode, ks0074 */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_SERIAL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_KS0074; if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = PIN_AUTOLF; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = PIN_STROBE; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = PIN_D0; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 16; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_NEXCOM: /* parallel mode, 8 bits, generic */ if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_AUTOLF; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_SELECP; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = PIN_INITP; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; case LCD_TYPE_CUSTOM: /* customer-defined */ if (lcd_proto < 0) lcd_proto = DEFAULT_LCD_PROTO; if (lcd_charset < 0) lcd_charset = DEFAULT_LCD_CHARSET; /* default geometry will be set later */ break; case LCD_TYPE_HANTRONIX: /* parallel mode, 8 bits, hantronix-like */ default: if (lcd_proto < 0) lcd_proto = LCD_PROTO_PARALLEL; if (lcd_charset < 0) lcd_charset = LCD_CHARSET_NORMAL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_STROBE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_SELECP; if (lcd_width < 0) lcd_width = 16; if (lcd_bwidth < 0) lcd_bwidth = 40; if (lcd_hwidth < 0) lcd_hwidth = 64; if (lcd_height < 0) lcd_height = 2; break; } /* this is used to catch wrong and default values */ if (lcd_width <= 0) lcd_width = DEFAULT_LCD_WIDTH; if (lcd_bwidth <= 0) lcd_bwidth = DEFAULT_LCD_BWIDTH; if (lcd_hwidth <= 0) lcd_hwidth = DEFAULT_LCD_HWIDTH; if (lcd_height <= 0) lcd_height = DEFAULT_LCD_HEIGHT; if (lcd_proto == LCD_PROTO_SERIAL) { /* SERIAL */ lcd_write_cmd = lcd_write_cmd_s; lcd_write_data = lcd_write_data_s; lcd_clear_fast = lcd_clear_fast_s; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = DEFAULT_LCD_PIN_SCL; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = DEFAULT_LCD_PIN_SDA; } else if (lcd_proto == LCD_PROTO_PARALLEL) { /* PARALLEL */ lcd_write_cmd = lcd_write_cmd_p8; lcd_write_data = lcd_write_data_p8; lcd_clear_fast = lcd_clear_fast_p8; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = DEFAULT_LCD_PIN_E; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = DEFAULT_LCD_PIN_RS; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = DEFAULT_LCD_PIN_RW; } else { lcd_write_cmd = lcd_write_cmd_tilcd; lcd_write_data = lcd_write_data_tilcd; lcd_clear_fast = lcd_clear_fast_tilcd; } if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = DEFAULT_LCD_PIN_BL; if (lcd_e_pin == PIN_NOT_SET) lcd_e_pin = PIN_NONE; if (lcd_rs_pin == PIN_NOT_SET) lcd_rs_pin = PIN_NONE; if (lcd_rw_pin == PIN_NOT_SET) lcd_rw_pin = PIN_NONE; if (lcd_bl_pin == PIN_NOT_SET) lcd_bl_pin = PIN_NONE; if (lcd_cl_pin == PIN_NOT_SET) lcd_cl_pin = PIN_NONE; if (lcd_da_pin == PIN_NOT_SET) lcd_da_pin = PIN_NONE; if (lcd_charset < 0) lcd_charset = DEFAULT_LCD_CHARSET; if (lcd_charset == LCD_CHARSET_KS0074) lcd_char_conv = lcd_char_conv_ks0074; else lcd_char_conv = NULL; if (lcd_bl_pin != PIN_NONE) init_scan_timer(); pin_to_bits(lcd_e_pin, lcd_bits[LCD_PORT_D][LCD_BIT_E], lcd_bits[LCD_PORT_C][LCD_BIT_E]); pin_to_bits(lcd_rs_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RS], lcd_bits[LCD_PORT_C][LCD_BIT_RS]); pin_to_bits(lcd_rw_pin, lcd_bits[LCD_PORT_D][LCD_BIT_RW], lcd_bits[LCD_PORT_C][LCD_BIT_RW]); pin_to_bits(lcd_bl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_BL], lcd_bits[LCD_PORT_C][LCD_BIT_BL]); pin_to_bits(lcd_cl_pin, lcd_bits[LCD_PORT_D][LCD_BIT_CL], lcd_bits[LCD_PORT_C][LCD_BIT_CL]); pin_to_bits(lcd_da_pin, lcd_bits[LCD_PORT_D][LCD_BIT_DA], lcd_bits[LCD_PORT_C][LCD_BIT_DA]); /* before this line, we must NOT send anything to the display. * Since lcd_init_display() needs to write data, we have to * enable mark the LCD initialized just before. */ lcd_initialized = 1; lcd_init_display(); /* display a short message */ #ifdef CONFIG_PANEL_CHANGE_MESSAGE #ifdef CONFIG_PANEL_BOOT_MESSAGE panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE); #endif #else panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-" PANEL_VERSION); #endif lcd_addr_x = lcd_addr_y = 0; /* clear the display on the next device opening */ lcd_must_clear = 1; lcd_gotoxy(); } /* * These are the file operation function for user access to /dev/keypad */ static ssize_t keypad_read(struct file *file, char *buf, size_t count, loff_t *ppos) { unsigned i = *ppos; char *tmp = buf; if (keypad_buflen == 0) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&keypad_read_wait); if (signal_pending(current)) return -EINTR; } for (; count-- > 0 && (keypad_buflen > 0); ++i, ++tmp, --keypad_buflen) { put_user(keypad_buffer[keypad_start], tmp); keypad_start = (keypad_start + 1) % KEYPAD_BUFFER; } *ppos = i; return tmp - buf; } static int keypad_open(struct inode *inode, struct file *file) { if (keypad_open_cnt) return -EBUSY; /* open only once at a time */ if (file->f_mode & FMODE_WRITE) /* device is read-only */ return -EPERM; keypad_buflen = 0; /* flush the buffer on opening */ keypad_open_cnt++; return 0; } static int keypad_release(struct inode *inode, struct file *file) { keypad_open_cnt--; return 0; } static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ .llseek = default_llseek, }; static struct miscdevice keypad_dev = { KEYPAD_MINOR, "keypad", &keypad_fops }; static void keypad_send_key(char *string, int max_len) { if (init_in_progress) return; /* send the key to the device only if a process is attached to it. */ if (keypad_open_cnt > 0) { while (max_len-- && keypad_buflen < KEYPAD_BUFFER && *string) { keypad_buffer[(keypad_start + keypad_buflen++) % KEYPAD_BUFFER] = *string++; } wake_up_interruptible(&keypad_read_wait); } } /* this function scans all the bits involving at least one logical signal, * and puts the results in the bitfield "phys_read" (one bit per established * contact), and sets "phys_read_prev" to "phys_read". * * Note: to debounce input signals, we will only consider as switched a signal * which is stable across 2 measures. Signals which are different between two * reads will be kept as they previously were in their logical form (phys_prev). * A signal which has just switched will have a 1 in * (phys_read ^ phys_read_prev). */ static void phys_scan_contacts(void) { int bit, bitval; char oldval; char bitmask; char gndmask; phys_prev = phys_curr; phys_read_prev = phys_read; phys_read = 0; /* flush all signals */ /* keep track of old value, with all outputs disabled */ oldval = r_dtr(pprt) | scan_mask_o; /* activate all keyboard outputs (active low) */ w_dtr(pprt, oldval & ~scan_mask_o); /* will have a 1 for each bit set to gnd */ bitmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* disable all matrix signals */ w_dtr(pprt, oldval); /* now that all outputs are cleared, the only active input bits are * directly connected to the ground */ /* 1 for each grounded input */ gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i; /* grounded inputs are signals 40-44 */ phys_read |= (pmask_t) gndmask << 40; if (bitmask != gndmask) { /* since clearing the outputs changed some inputs, we know * that some input signals are currently tied to some outputs. * So we'll scan them. */ for (bit = 0; bit < 8; bit++) { bitval = 1 << bit; if (!(scan_mask_o & bitval)) /* skip unused bits */ continue; w_dtr(pprt, oldval & ~bitval); /* enable this output */ bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask; phys_read |= (pmask_t) bitmask << (5 * bit); } w_dtr(pprt, oldval); /* disable all outputs */ } /* this is easy: use old bits when they are flapping, * use new ones when stable */ phys_curr = (phys_prev & (phys_read ^ phys_read_prev)) | (phys_read & ~(phys_read ^ phys_read_prev)); } static inline int input_state_high(struct logical_input *input) { #if 0 /* FIXME: * this is an invalid test. It tries to catch * transitions from single-key to multiple-key, but * doesn't take into account the contacts polarity. * The only solution to the problem is to parse keys * from the most complex to the simplest combinations, * and mark them as 'caught' once a combination * matches, then unmatch it for all other ones. */ /* try to catch dangerous transitions cases : * someone adds a bit, so this signal was a false * positive resulting from a transition. We should * invalidate the signal immediately and not call the * release function. * eg: 0 -(press A)-> A -(press B)-> AB : don't match A's release. */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return 1; } #endif if ((phys_curr & input->mask) == input->value) { if ((input->type == INPUT_TYPE_STD) && (input->high_timer == 0)) { input->high_timer++; if (input->u.std.press_fct != NULL) input->u.std.press_fct(input->u.std.press_data); } else if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->high_timer == 0) { char *press_str = input->u.kbd.press_str; if (press_str[0]) keypad_send_key(press_str, sizeof(press_str)); } if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) { input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, sizeof(repeat_str)); } /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } return 1; } else { /* else signal falling down. Let's fall through. */ input->state = INPUT_ST_FALLING; input->fall_timer = 0; } return 0; } static inline void input_state_falling(struct logical_input *input) { #if 0 /* FIXME !!! same comment as in input_state_high */ if (((phys_prev & input->mask) == input->value) && ((phys_curr & input->mask) > input->value)) { input->state = INPUT_ST_LOW; /* invalidate */ return; } #endif if ((phys_curr & input->mask) == input->value) { if (input->type == INPUT_TYPE_KBD) { /* will turn on the light */ keypressed = 1; if (input->u.kbd.repeat_str[0]) { char *repeat_str = input->u.kbd.repeat_str; if (input->high_timer >= KEYPAD_REP_START) input->high_timer -= KEYPAD_REP_DELAY; keypad_send_key(repeat_str, sizeof(repeat_str)); /* we will need to come back here soon */ inputs_stable = 0; } if (input->high_timer < 255) input->high_timer++; } input->state = INPUT_ST_HIGH; } else if (input->fall_timer >= input->fall_time) { /* call release event */ if (input->type == INPUT_TYPE_STD) { void (*release_fct)(int) = input->u.std.release_fct; if (release_fct != NULL) release_fct(input->u.std.release_data); } else if (input->type == INPUT_TYPE_KBD) { char *release_str = input->u.kbd.release_str; if (release_str[0]) keypad_send_key(release_str, sizeof(release_str)); } input->state = INPUT_ST_LOW; } else { input->fall_timer++; inputs_stable = 0; } } static void panel_process_inputs(void) { struct list_head *item; struct logical_input *input; #if 0 printk(KERN_DEBUG "entering panel_process_inputs with pp=%016Lx & pc=%016Lx\n", phys_prev, phys_curr); #endif keypressed = 0; inputs_stable = 1; list_for_each(item, &logical_inputs) { input = list_entry(item, struct logical_input, list); switch (input->state) { case INPUT_ST_LOW: if ((phys_curr & input->mask) != input->value) break; /* if all needed ones were already set previously, * this means that this logical signal has been * activated by the releasing of another combined * signal, so we don't want to match. * eg: AB -(release B)-> A -(release A)-> 0 : * don't match A. */ if ((phys_prev & input->mask) == input->value) break; input->rise_timer = 0; input->state = INPUT_ST_RISING; /* no break here, fall through */ case INPUT_ST_RISING: if ((phys_curr & input->mask) != input->value) { input->state = INPUT_ST_LOW; break; } if (input->rise_timer < input->rise_time) { inputs_stable = 0; input->rise_timer++; break; } input->high_timer = 0; input->state = INPUT_ST_HIGH; /* no break here, fall through */ case INPUT_ST_HIGH: if (input_state_high(input)) break; /* no break here, fall through */ case INPUT_ST_FALLING: input_state_falling(input); } } } static void panel_scan_timer(void) { if (keypad_enabled && keypad_initialized) { if (spin_trylock(&pprt_lock)) { phys_scan_contacts(); /* no need for the parport anymore */ spin_unlock(&pprt_lock); } if (!inputs_stable || phys_curr != phys_prev) panel_process_inputs(); } if (lcd_enabled && lcd_initialized) { if (keypressed) { if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(1); light_tempo = FLASH_LIGHT_TEMPO; } else if (light_tempo > 0) { light_tempo--; if (light_tempo == 0 && ((lcd_flags & LCD_FLAG_L) == 0)) lcd_backlight(0); } } mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME); } static void init_scan_timer(void) { if (scan_timer.function != NULL) return; /* already started */ init_timer(&scan_timer); scan_timer.expires = jiffies + INPUT_POLL_TIME; scan_timer.data = 0; scan_timer.function = (void *)&panel_scan_timer; add_timer(&scan_timer); } /* converts a name of the form "({BbAaPpSsEe}{01234567-})*" to a series of bits. * if <omask> or <imask> are non-null, they will be or'ed with the bits * corresponding to out and in bits respectively. * returns 1 if ok, 0 if error (in which case, nothing is written). */ static int input_name2mask(char *name, pmask_t *mask, pmask_t *value, char *imask, char *omask) { static char sigtab[10] = "EeSsPpAaBb"; char im, om; pmask_t m, v; om = im = m = v = 0ULL; while (*name) { int in, out, bit, neg; for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name); in++) ; if (in >= sizeof(sigtab)) return 0; /* input name not found */ neg = (in & 1); /* odd (lower) names are negated */ in >>= 1; im |= (1 << in); name++; if (isdigit(*name)) { out = *name - '0'; om |= (1 << out); } else if (*name == '-') out = 8; else return 0; /* unknown bit name */ bit = (out * 5) + in; m |= 1ULL << bit; if (!neg) v |= 1ULL << bit; name++; } *mask = m; *value = v; if (imask) *imask |= im; if (omask) *omask |= om; return 1; } /* tries to bind a key to the signal name <name>. The key will send the * strings <press>, <repeat>, <release> for these respective events. * Returns the pointer to the new key if ok, NULL if the key could not be bound. */ static struct logical_input *panel_bind_key(char *name, char *press, char *repeat, char *release) { struct logical_input *key; key = kzalloc(sizeof(struct logical_input), GFP_KERNEL); if (!key) { printk(KERN_ERR "panel: not enough memory\n"); return NULL; } if (!input_name2mask(name, &key->mask, &key->value, &scan_mask_i, &scan_mask_o)) { kfree(key); return NULL; } key->type = INPUT_TYPE_KBD; key->state = INPUT_ST_LOW; key->rise_time = 1; key->fall_time = 1; #if 0 printk(KERN_DEBUG "bind: <%s> : m=%016Lx v=%016Lx\n", name, key->mask, key->value); #endif strncpy(key->u.kbd.press_str, press, sizeof(key->u.kbd.press_str)); strncpy(key->u.kbd.repeat_str, repeat, sizeof(key->u.kbd.repeat_str)); strncpy(key->u.kbd.release_str, release, sizeof(key->u.kbd.release_str)); list_add(&key->list, &logical_inputs); return key; } #if 0 /* tries to bind a callback function to the signal name <name>. The function * <press_fct> will be called with the <press_data> arg when the signal is * activated, and so on for <release_fct>/<release_data> * Returns the pointer to the new signal if ok, NULL if the signal could not * be bound. */ static struct logical_input *panel_bind_callback(char *name, void (*press_fct) (int), int press_data, void (*release_fct) (int), int release_data) { struct logical_input *callback; callback = kmalloc(sizeof(struct logical_input), GFP_KERNEL); if (!callback) { printk(KERN_ERR "panel: not enough memory\n"); return NULL; } memset(callback, 0, sizeof(struct logical_input)); if (!input_name2mask(name, &callback->mask, &callback->value, &scan_mask_i, &scan_mask_o)) return NULL; callback->type = INPUT_TYPE_STD; callback->state = INPUT_ST_LOW; callback->rise_time = 1; callback->fall_time = 1; callback->u.std.press_fct = press_fct; callback->u.std.press_data = press_data; callback->u.std.release_fct = release_fct; callback->u.std.release_data = release_data; list_add(&callback->list, &logical_inputs); return callback; } #endif static void keypad_init(void) { int keynum; init_waitqueue_head(&keypad_read_wait); keypad_buflen = 0; /* flushes any eventual noisy keystroke */ /* Let's create all known keys */ for (keynum = 0; keypad_profile[keynum][0][0]; keynum++) { panel_bind_key(keypad_profile[keynum][0], keypad_profile[keynum][1], keypad_profile[keynum][2], keypad_profile[keynum][3]); } init_scan_timer(); keypad_initialized = 1; } /**************************************************/ /* device initialization */ /**************************************************/ static int panel_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (lcd_enabled && lcd_initialized) { switch (code) { case SYS_DOWN: panel_lcd_print ("\x0cReloading\nSystem...\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_HALT: panel_lcd_print ("\x0cSystem Halted.\x1b[Lc\x1b[Lb\x1b[L+"); break; case SYS_POWER_OFF: panel_lcd_print("\x0cPower off.\x1b[Lc\x1b[Lb\x1b[L+"); break; default: break; } } return NOTIFY_DONE; } static struct notifier_block panel_notifier = { panel_notify_sys, NULL, 0 }; static void panel_attach(struct parport *port) { if (port->number != parport) return; if (pprt) { printk(KERN_ERR "panel_attach(): port->number=%d parport=%d, " "already registered !\n", port->number, parport); return; } pprt = parport_register_device(port, "panel", NULL, NULL, /* pf, kf */ NULL, /*PARPORT_DEV_EXCL */ 0, (void *)&pprt); if (pprt == NULL) { pr_err("panel_attach(): port->number=%d parport=%d, " "parport_register_device() failed\n", port->number, parport); return; } if (parport_claim(pprt)) { printk(KERN_ERR "Panel: could not claim access to parport%d. " "Aborting.\n", parport); goto err_unreg_device; } /* must init LCD first, just in case an IRQ from the keypad is * generated at keypad init */ if (lcd_enabled) { lcd_init(); if (misc_register(&lcd_dev)) goto err_unreg_device; } if (keypad_enabled) { keypad_init(); if (misc_register(&keypad_dev)) goto err_lcd_unreg; } return; err_lcd_unreg: if (lcd_enabled) misc_deregister(&lcd_dev); err_unreg_device: parport_unregister_device(pprt); pprt = NULL; } static void panel_detach(struct parport *port) { if (port->number != parport) return; if (!pprt) { printk(KERN_ERR "panel_detach(): port->number=%d parport=%d, " "nothing to unregister.\n", port->number, parport); return; } if (keypad_enabled && keypad_initialized) { misc_deregister(&keypad_dev); keypad_initialized = 0; } if (lcd_enabled && lcd_initialized) { misc_deregister(&lcd_dev); lcd_initialized = 0; } parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } static struct parport_driver panel_driver = { .name = "panel", .attach = panel_attach, .detach = panel_detach, }; /* init function */ int panel_init(void) { /* for backwards compatibility */ if (keypad_type < 0) keypad_type = keypad_enabled; if (lcd_type < 0) lcd_type = lcd_enabled; if (parport < 0) parport = DEFAULT_PARPORT; /* take care of an eventual profile */ switch (profile) { case PANEL_PROFILE_CUSTOM: /* custom profile */ if (keypad_type < 0) keypad_type = DEFAULT_KEYPAD; if (lcd_type < 0) lcd_type = DEFAULT_LCD; break; case PANEL_PROFILE_OLD: /* 8 bits, 2*16, old keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_OLD; if (lcd_type < 0) lcd_type = LCD_TYPE_OLD; if (lcd_width < 0) lcd_width = 16; if (lcd_hwidth < 0) lcd_hwidth = 16; break; case PANEL_PROFILE_NEW: /* serial, 2*16, new keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NEW; if (lcd_type < 0) lcd_type = LCD_TYPE_KS0074; break; case PANEL_PROFILE_HANTRONIX: /* 8 bits, 2*16 hantronix-like, no keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NONE; if (lcd_type < 0) lcd_type = LCD_TYPE_HANTRONIX; break; case PANEL_PROFILE_NEXCOM: /* generic 8 bits, 2*16, nexcom keypad, eg. Nexcom. */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_NEXCOM; if (lcd_type < 0) lcd_type = LCD_TYPE_NEXCOM; break; case PANEL_PROFILE_LARGE: /* 8 bits, 2*40, old keypad */ if (keypad_type < 0) keypad_type = KEYPAD_TYPE_OLD; if (lcd_type < 0) lcd_type = LCD_TYPE_OLD; break; } lcd_enabled = (lcd_type > 0); keypad_enabled = (keypad_type > 0); switch (keypad_type) { case KEYPAD_TYPE_OLD: keypad_profile = old_keypad_profile; break; case KEYPAD_TYPE_NEW: keypad_profile = new_keypad_profile; break; case KEYPAD_TYPE_NEXCOM: keypad_profile = nexcom_keypad_profile; break; default: keypad_profile = NULL; break; } /* tells various subsystems about the fact that we are initializing */ init_in_progress = 1; if (parport_register_driver(&panel_driver)) { printk(KERN_ERR "Panel: could not register with parport. Aborting.\n"); return -EIO; } if (!lcd_enabled && !keypad_enabled) { /* no device enabled, let's release the parport */ if (pprt) { parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } parport_unregister_driver(&panel_driver); printk(KERN_ERR "Panel driver version " PANEL_VERSION " disabled.\n"); return -ENODEV; } register_reboot_notifier(&panel_notifier); if (pprt) printk(KERN_INFO "Panel driver version " PANEL_VERSION " registered on parport%d (io=0x%lx).\n", parport, pprt->port->base); else printk(KERN_INFO "Panel driver version " PANEL_VERSION " not yet registered\n"); /* tells various subsystems about the fact that initialization is finished */ init_in_progress = 0; return 0; } static int __init panel_init_module(void) { return panel_init(); } static void __exit panel_cleanup_module(void) { unregister_reboot_notifier(&panel_notifier); if (scan_timer.function != NULL) del_timer(&scan_timer); if (pprt != NULL) { if (keypad_enabled) { misc_deregister(&keypad_dev); keypad_initialized = 0; } if (lcd_enabled) { panel_lcd_print("\x0cLCD driver " PANEL_VERSION "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); misc_deregister(&lcd_dev); lcd_initialized = 0; } /* TODO: free all input signals */ parport_release(pprt); parport_unregister_device(pprt); pprt = NULL; } parport_unregister_driver(&panel_driver); } module_init(panel_init_module); module_exit(panel_cleanup_module); MODULE_AUTHOR("Willy Tarreau"); MODULE_LICENSE("GPL"); /* * Local variables: * c-indent-level: 4 * tab-width: 8 * End: */
gpl-2.0
Zenfone2-development/android_kernel_asus_moorefield
drivers/media/tuners/tda8290.c
3202
25234
/* i2c tv tuner chip device driver controls the philips tda8290+75 tuner chip combo. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. This "tda8290" module was split apart from the original "tuner" module. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/videodev2.h> #include "tuner-i2c.h" #include "tda8290.h" #include "tda827x.h" #include "tda18271.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); static int deemphasis_50; module_param(deemphasis_50, int, 0644); MODULE_PARM_DESC(deemphasis_50, "0 - 75us deemphasis; 1 - 50us deemphasis"); /* ---------------------------------------------------------------------- */ struct tda8290_priv { struct tuner_i2c_props i2c_props; unsigned char tda8290_easy_mode; unsigned char tda827x_addr; unsigned char ver; #define TDA8290 1 #define TDA8295 2 #define TDA8275 4 #define TDA8275A 8 #define TDA18271 16 struct tda827x_config cfg; struct tda18271_std_map *tda18271_std_map; }; /*---------------------------------------------------------------------*/ static int tda8290_i2c_bridge(struct dvb_frontend *fe, int close) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char enable[2] = { 0x21, 0xC0 }; unsigned char disable[2] = { 0x21, 0x00 }; unsigned char *msg; if (close) { msg = enable; tuner_i2c_xfer_send(&priv->i2c_props, msg, 2); /* let the bridge stabilize */ msleep(20); } else { msg = disable; tuner_i2c_xfer_send(&priv->i2c_props, msg, 2); } return 0; } static int tda8295_i2c_bridge(struct dvb_frontend *fe, int close) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char enable[2] = { 0x45, 0xc1 }; unsigned char disable[2] = { 0x46, 0x00 }; unsigned char buf[3] = { 0x45, 0x01, 0x00 }; unsigned char *msg; if (close) { msg = enable; tuner_i2c_xfer_send(&priv->i2c_props, msg, 2); /* let the bridge stabilize */ msleep(20); } else { msg = disable; tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1); buf[2] = msg[1]; buf[2] &= ~0x04; tuner_i2c_xfer_send(&priv->i2c_props, buf, 3); msleep(5); msg[1] |= 0x04; tuner_i2c_xfer_send(&priv->i2c_props, msg, 2); } return 0; } /*---------------------------------------------------------------------*/ static void set_audio(struct dvb_frontend *fe, struct analog_parameters *params) { struct tda8290_priv *priv = fe->analog_demod_priv; char* mode; if (params->std & V4L2_STD_MN) { priv->tda8290_easy_mode = 0x01; mode = "MN"; } else if (params->std & V4L2_STD_B) { priv->tda8290_easy_mode = 0x02; mode = "B"; } else if (params->std & V4L2_STD_GH) { priv->tda8290_easy_mode = 0x04; mode = "GH"; } else if (params->std & V4L2_STD_PAL_I) { priv->tda8290_easy_mode = 0x08; mode = "I"; } else if (params->std & V4L2_STD_DK) { priv->tda8290_easy_mode = 0x10; mode = "DK"; } else if (params->std & V4L2_STD_SECAM_L) { priv->tda8290_easy_mode = 0x20; mode = "L"; } else if (params->std & V4L2_STD_SECAM_LC) { priv->tda8290_easy_mode = 0x40; mode = "LC"; } else { priv->tda8290_easy_mode = 0x10; mode = "xx"; } if (params->mode == V4L2_TUNER_RADIO) { /* Set TDA8295 to FM radio; Start TDA8290 with MN values */ priv->tda8290_easy_mode = (priv->ver & TDA8295) ? 0x80 : 0x01; tuner_dbg("setting to radio FM\n"); } else { tuner_dbg("setting tda829x to system %s\n", mode); } } static struct { unsigned char seq[2]; } fm_mode[] = { { { 0x01, 0x81} }, /* Put device into expert mode */ { { 0x03, 0x48} }, /* Disable NOTCH and VIDEO filters */ { { 0x04, 0x04} }, /* Disable color carrier filter (SSIF) */ { { 0x05, 0x04} }, /* ADC headroom */ { { 0x06, 0x10} }, /* group delay flat */ { { 0x07, 0x00} }, /* use the same radio DTO values as a tda8295 */ { { 0x08, 0x00} }, { { 0x09, 0x80} }, { { 0x0a, 0xda} }, { { 0x0b, 0x4b} }, { { 0x0c, 0x68} }, { { 0x0d, 0x00} }, /* PLL off, no video carrier detect */ { { 0x14, 0x00} }, /* disable auto mute if no video */ }; static void tda8290_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char soft_reset[] = { 0x00, 0x00 }; unsigned char easy_mode[] = { 0x01, priv->tda8290_easy_mode }; unsigned char expert_mode[] = { 0x01, 0x80 }; unsigned char agc_out_on[] = { 0x02, 0x00 }; unsigned char gainset_off[] = { 0x28, 0x14 }; unsigned char if_agc_spd[] = { 0x0f, 0x88 }; unsigned char adc_head_6[] = { 0x05, 0x04 }; unsigned char adc_head_9[] = { 0x05, 0x02 }; unsigned char adc_head_12[] = { 0x05, 0x01 }; unsigned char pll_bw_nom[] = { 0x0d, 0x47 }; unsigned char pll_bw_low[] = { 0x0d, 0x27 }; unsigned char gainset_2[] = { 0x28, 0x64 }; unsigned char agc_rst_on[] = { 0x0e, 0x0b }; unsigned char agc_rst_off[] = { 0x0e, 0x09 }; unsigned char if_agc_set[] = { 0x0f, 0x81 }; unsigned char addr_adc_sat = 0x1a; unsigned char addr_agc_stat = 0x1d; unsigned char addr_pll_stat = 0x1b; unsigned char adc_sat, agc_stat, pll_stat; int i; set_audio(fe, params); if (priv->cfg.config) tuner_dbg("tda827xa config is 0x%02x\n", priv->cfg.config); tuner_i2c_xfer_send(&priv->i2c_props, easy_mode, 2); tuner_i2c_xfer_send(&priv->i2c_props, agc_out_on, 2); tuner_i2c_xfer_send(&priv->i2c_props, soft_reset, 2); msleep(1); if (params->mode == V4L2_TUNER_RADIO) { unsigned char deemphasis[] = { 0x13, 1 }; /* FIXME: allow using a different deemphasis */ if (deemphasis_50) deemphasis[1] = 2; for (i = 0; i < ARRAY_SIZE(fm_mode); i++) tuner_i2c_xfer_send(&priv->i2c_props, fm_mode[i].seq, 2); tuner_i2c_xfer_send(&priv->i2c_props, deemphasis, 2); } else { expert_mode[1] = priv->tda8290_easy_mode + 0x80; tuner_i2c_xfer_send(&priv->i2c_props, expert_mode, 2); tuner_i2c_xfer_send(&priv->i2c_props, gainset_off, 2); tuner_i2c_xfer_send(&priv->i2c_props, if_agc_spd, 2); if (priv->tda8290_easy_mode & 0x60) tuner_i2c_xfer_send(&priv->i2c_props, adc_head_9, 2); else tuner_i2c_xfer_send(&priv->i2c_props, adc_head_6, 2); tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2); } if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); if (fe->ops.tuner_ops.set_analog_params) fe->ops.tuner_ops.set_analog_params(fe, params); for (i = 0; i < 3; i++) { tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_pll_stat, 1, &pll_stat, 1); if (pll_stat & 0x80) { tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_adc_sat, 1, &adc_sat, 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_agc_stat, 1, &agc_stat, 1); tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat); break; } else { tuner_dbg("tda8290 not locked, no signal?\n"); msleep(100); } } /* adjust headroom resp. gain */ if ((agc_stat > 115) || (!(pll_stat & 0x80) && (adc_sat < 20))) { tuner_dbg("adjust gain, step 1. Agc: %d, ADC stat: %d, lock: %d\n", agc_stat, adc_sat, pll_stat & 0x80); tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2); msleep(100); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_agc_stat, 1, &agc_stat, 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_pll_stat, 1, &pll_stat, 1); if ((agc_stat > 115) || !(pll_stat & 0x80)) { tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n", agc_stat, pll_stat & 0x80); if (priv->cfg.agcf) priv->cfg.agcf(fe); msleep(100); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_agc_stat, 1, &agc_stat, 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_pll_stat, 1, &pll_stat, 1); if((agc_stat > 115) || !(pll_stat & 0x80)) { tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat); tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2); tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_low, 2); msleep(100); } } } /* l/ l' deadlock? */ if(priv->tda8290_easy_mode & 0x60) { tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_adc_sat, 1, &adc_sat, 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &addr_pll_stat, 1, &pll_stat, 1); if ((adc_sat > 20) || !(pll_stat & 0x80)) { tuner_dbg("trying to resolve SECAM L deadlock\n"); tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2); msleep(40); tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_off, 2); } } if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); tuner_i2c_xfer_send(&priv->i2c_props, if_agc_set, 2); } /*---------------------------------------------------------------------*/ static void tda8295_power(struct dvb_frontend *fe, int enable) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1); if (enable) buf[1] = 0x01; else buf[1] = 0x03; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); } static void tda8295_set_easy_mode(struct dvb_frontend *fe, int enable) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char buf[] = { 0x01, 0x00 }; tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1); if (enable) buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */ else buf[1] = 0x00; /* reset active bit */ tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); } static void tda8295_set_video_std(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char buf[] = { 0x00, priv->tda8290_easy_mode }; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); tda8295_set_easy_mode(fe, 1); msleep(20); tda8295_set_easy_mode(fe, 0); } /*---------------------------------------------------------------------*/ static void tda8295_agc1_out(struct dvb_frontend *fe, int enable) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */ tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1); if (enable) buf[1] &= ~0x40; else buf[1] |= 0x40; tuner_i2c_xfer_send(&priv->i2c_props, buf, 2); } static void tda8295_agc2_out(struct dvb_frontend *fe, int enable) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char set_gpio_cf[] = { 0x44, 0x00 }; unsigned char set_gpio_val[] = { 0x46, 0x00 }; tuner_i2c_xfer_send_recv(&priv->i2c_props, &set_gpio_cf[0], 1, &set_gpio_cf[1], 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &set_gpio_val[0], 1, &set_gpio_val[1], 1); set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */ if (enable) { set_gpio_cf[1] |= 0x01; /* config GPIO_0 as Open Drain Out */ set_gpio_val[1] &= 0xfe; /* set GPIO_0 pin low */ } tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_cf, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_val, 2); } static int tda8295_has_signal(struct dvb_frontend *fe, u16 *signal) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char hvpll_stat = 0x26; unsigned char ret; tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1); *signal = (ret & 0x01) ? 65535 : 0; return 0; } /*---------------------------------------------------------------------*/ static void tda8295_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct tda8290_priv *priv = fe->analog_demod_priv; u16 signal = 0; unsigned char blanking_mode[] = { 0x1d, 0x00 }; set_audio(fe, params); tuner_dbg("%s: freq = %d\n", __func__, params->frequency); tda8295_power(fe, 1); tda8295_agc1_out(fe, 1); tuner_i2c_xfer_send_recv(&priv->i2c_props, &blanking_mode[0], 1, &blanking_mode[1], 1); tda8295_set_video_std(fe); blanking_mode[1] = 0x03; tuner_i2c_xfer_send(&priv->i2c_props, blanking_mode, 2); msleep(20); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); if (fe->ops.tuner_ops.set_analog_params) fe->ops.tuner_ops.set_analog_params(fe, params); if (priv->cfg.agcf) priv->cfg.agcf(fe); tda8295_has_signal(fe, &signal); if (signal) tuner_dbg("tda8295 is locked\n"); else tuner_dbg("tda8295 not locked, no signal?\n"); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); } /*---------------------------------------------------------------------*/ static int tda8290_has_signal(struct dvb_frontend *fe, u16 *signal) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char i2c_get_afc[1] = { 0x1B }; unsigned char afc = 0; tuner_i2c_xfer_send_recv(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1); *signal = (afc & 0x80) ? 65535 : 0; return 0; } /*---------------------------------------------------------------------*/ static void tda8290_standby(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char cb1[] = { 0x30, 0xD0 }; unsigned char tda8290_standby[] = { 0x00, 0x02 }; unsigned char tda8290_agc_tri[] = { 0x02, 0x20 }; struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=cb1, .len = 2}; if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); if (priv->ver & TDA8275A) cb1[1] = 0x90; i2c_transfer(priv->i2c_props.adap, &msg, 1); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); tuner_i2c_xfer_send(&priv->i2c_props, tda8290_agc_tri, 2); tuner_i2c_xfer_send(&priv->i2c_props, tda8290_standby, 2); } static void tda8295_standby(struct dvb_frontend *fe) { tda8295_agc1_out(fe, 0); /* Put AGC in tri-state */ tda8295_power(fe, 0); } static void tda8290_init_if(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char set_VS[] = { 0x30, 0x6F }; unsigned char set_GP00_CF[] = { 0x20, 0x01 }; unsigned char set_GP01_CF[] = { 0x20, 0x0B }; if ((priv->cfg.config == TDA8290_LNA_GP0_HIGH_ON) || (priv->cfg.config == TDA8290_LNA_GP0_HIGH_OFF)) tuner_i2c_xfer_send(&priv->i2c_props, set_GP00_CF, 2); else tuner_i2c_xfer_send(&priv->i2c_props, set_GP01_CF, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_VS, 2); } static void tda8295_init_if(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; static unsigned char set_adc_ctl[] = { 0x33, 0x14 }; static unsigned char set_adc_ctl2[] = { 0x34, 0x00 }; static unsigned char set_pll_reg6[] = { 0x3e, 0x63 }; static unsigned char set_pll_reg0[] = { 0x38, 0x23 }; static unsigned char set_pll_reg7[] = { 0x3f, 0x01 }; static unsigned char set_pll_reg10[] = { 0x42, 0x61 }; static unsigned char set_gpio_reg0[] = { 0x44, 0x0b }; tda8295_power(fe, 1); tda8295_set_easy_mode(fe, 0); tda8295_set_video_std(fe); tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_adc_ctl2, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg6, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg0, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg7, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_pll_reg10, 2); tuner_i2c_xfer_send(&priv->i2c_props, set_gpio_reg0, 2); tda8295_agc1_out(fe, 0); tda8295_agc2_out(fe, 0); } static void tda8290_init_tuner(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; unsigned char tda8275_init[] = { 0x00, 0x00, 0x00, 0x40, 0xdC, 0x04, 0xAf, 0x3F, 0x2A, 0x04, 0xFF, 0x00, 0x00, 0x40 }; unsigned char tda8275a_init[] = { 0x00, 0x00, 0x00, 0x00, 0xdC, 0x05, 0x8b, 0x0c, 0x04, 0x20, 0xFF, 0x00, 0x00, 0x4b }; struct i2c_msg msg = {.addr = priv->tda827x_addr, .flags=0, .buf=tda8275_init, .len = 14}; if (priv->ver & TDA8275A) msg.buf = tda8275a_init; if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); i2c_transfer(priv->i2c_props.adap, &msg, 1); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); } /*---------------------------------------------------------------------*/ static void tda829x_release(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; /* only try to release the tuner if we've * attached it from within this module */ if (priv->ver & (TDA18271 | TDA8275 | TDA8275A)) if (fe->ops.tuner_ops.release) fe->ops.tuner_ops.release(fe); kfree(fe->analog_demod_priv); fe->analog_demod_priv = NULL; } static struct tda18271_config tda829x_tda18271_config = { .gate = TDA18271_GATE_ANALOG, }; static int tda829x_find_tuner(struct dvb_frontend *fe) { struct tda8290_priv *priv = fe->analog_demod_priv; int i, ret, tuners_found; u32 tuner_addrs; u8 data; struct i2c_msg msg = { .flags = I2C_M_RD, .buf = &data, .len = 1 }; if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); /* probe for tuner chip */ tuners_found = 0; tuner_addrs = 0; for (i = 0x60; i <= 0x63; i++) { msg.addr = i; ret = i2c_transfer(priv->i2c_props.adap, &msg, 1); if (ret == 1) { tuners_found++; tuner_addrs = (tuner_addrs << 8) + i; } } /* if there is more than one tuner, we expect the right one is behind the bridge and we choose the highest address that doesn't give a response now */ if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); if (tuners_found > 1) for (i = 0; i < tuners_found; i++) { msg.addr = tuner_addrs & 0xff; ret = i2c_transfer(priv->i2c_props.adap, &msg, 1); if (ret == 1) tuner_addrs = tuner_addrs >> 8; else break; } if (tuner_addrs == 0) { tuner_addrs = 0x60; tuner_info("could not clearly identify tuner address, " "defaulting to %x\n", tuner_addrs); } else { tuner_addrs = tuner_addrs & 0xff; tuner_info("setting tuner address to %x\n", tuner_addrs); } priv->tda827x_addr = tuner_addrs; msg.addr = tuner_addrs; if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 1); ret = i2c_transfer(priv->i2c_props.adap, &msg, 1); if (ret != 1) { tuner_warn("tuner access failed!\n"); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); return -EREMOTEIO; } if ((data == 0x83) || (data == 0x84)) { priv->ver |= TDA18271; tda829x_tda18271_config.config = priv->cfg.config; tda829x_tda18271_config.std_map = priv->tda18271_std_map; dvb_attach(tda18271_attach, fe, priv->tda827x_addr, priv->i2c_props.adap, &tda829x_tda18271_config); } else { if ((data & 0x3c) == 0) priv->ver |= TDA8275; else priv->ver |= TDA8275A; dvb_attach(tda827x_attach, fe, priv->tda827x_addr, priv->i2c_props.adap, &priv->cfg); priv->cfg.switch_addr = priv->i2c_props.addr; } if (fe->ops.tuner_ops.init) fe->ops.tuner_ops.init(fe); if (fe->ops.tuner_ops.sleep) fe->ops.tuner_ops.sleep(fe); if (fe->ops.analog_ops.i2c_gate_ctrl) fe->ops.analog_ops.i2c_gate_ctrl(fe, 0); return 0; } static int tda8290_probe(struct tuner_i2c_props *i2c_props) { #define TDA8290_ID 0x89 u8 reg = 0x1f, id; struct i2c_msg msg_read[] = { { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, }; /* detect tda8290 */ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", __func__, reg); return -ENODEV; } if (id == TDA8290_ID) { if (debug) printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n", __func__, i2c_adapter_id(i2c_props->adap), i2c_props->addr); return 0; } return -ENODEV; } static int tda8295_probe(struct tuner_i2c_props *i2c_props) { #define TDA8295_ID 0x8a #define TDA8295C2_ID 0x8b u8 reg = 0x2f, id; struct i2c_msg msg_read[] = { { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg }, { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, }; /* detect tda8295 */ if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", __func__, reg); return -ENODEV; } if ((id & 0xfe) == TDA8295_ID) { if (debug) printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n", __func__, (id == TDA8295_ID) ? "tda8295c1" : "tda8295c2", i2c_adapter_id(i2c_props->adap), i2c_props->addr); return 0; } return -ENODEV; } static struct analog_demod_ops tda8290_ops = { .set_params = tda8290_set_params, .has_signal = tda8290_has_signal, .standby = tda8290_standby, .release = tda829x_release, .i2c_gate_ctrl = tda8290_i2c_bridge, }; static struct analog_demod_ops tda8295_ops = { .set_params = tda8295_set_params, .has_signal = tda8295_has_signal, .standby = tda8295_standby, .release = tda829x_release, .i2c_gate_ctrl = tda8295_i2c_bridge, }; struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c_adap, u8 i2c_addr, struct tda829x_config *cfg) { struct tda8290_priv *priv = NULL; char *name; priv = kzalloc(sizeof(struct tda8290_priv), GFP_KERNEL); if (priv == NULL) return NULL; fe->analog_demod_priv = priv; priv->i2c_props.addr = i2c_addr; priv->i2c_props.adap = i2c_adap; priv->i2c_props.name = "tda829x"; if (cfg) { priv->cfg.config = cfg->lna_cfg; priv->tda18271_std_map = cfg->tda18271_std_map; } if (tda8290_probe(&priv->i2c_props) == 0) { priv->ver = TDA8290; memcpy(&fe->ops.analog_ops, &tda8290_ops, sizeof(struct analog_demod_ops)); } if (tda8295_probe(&priv->i2c_props) == 0) { priv->ver = TDA8295; memcpy(&fe->ops.analog_ops, &tda8295_ops, sizeof(struct analog_demod_ops)); } if (cfg && cfg->no_i2c_gate) fe->ops.analog_ops.i2c_gate_ctrl = NULL; if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) { tda8295_power(fe, 1); if (tda829x_find_tuner(fe) < 0) goto fail; } switch (priv->ver) { case TDA8290: name = "tda8290"; break; case TDA8295: name = "tda8295"; break; case TDA8290 | TDA8275: name = "tda8290+75"; break; case TDA8295 | TDA8275: name = "tda8295+75"; break; case TDA8290 | TDA8275A: name = "tda8290+75a"; break; case TDA8295 | TDA8275A: name = "tda8295+75a"; break; case TDA8290 | TDA18271: name = "tda8290+18271"; break; case TDA8295 | TDA18271: name = "tda8295+18271"; break; default: goto fail; } tuner_info("type set to %s\n", name); fe->ops.analog_ops.info.name = name; if (priv->ver & TDA8290) { if (priv->ver & (TDA8275 | TDA8275A)) tda8290_init_tuner(fe); tda8290_init_if(fe); } else if (priv->ver & TDA8295) tda8295_init_if(fe); return fe; fail: memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops)); tda829x_release(fe); return NULL; } EXPORT_SYMBOL_GPL(tda829x_attach); int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr) { struct tuner_i2c_props i2c_props = { .adap = i2c_adap, .addr = i2c_addr, }; unsigned char soft_reset[] = { 0x00, 0x00 }; unsigned char easy_mode_b[] = { 0x01, 0x02 }; unsigned char easy_mode_g[] = { 0x01, 0x04 }; unsigned char restore_9886[] = { 0x00, 0xd6, 0x30 }; unsigned char addr_dto_lsb = 0x07; unsigned char data; #define PROBE_BUFFER_SIZE 8 unsigned char buf[PROBE_BUFFER_SIZE]; int i; /* rule out tda9887, which would return the same byte repeatedly */ tuner_i2c_xfer_send_recv(&i2c_props, soft_reset, 1, buf, PROBE_BUFFER_SIZE); for (i = 1; i < PROBE_BUFFER_SIZE; i++) { if (buf[i] != buf[0]) break; } /* all bytes are equal, not a tda829x - probably a tda9887 */ if (i == PROBE_BUFFER_SIZE) return -ENODEV; if ((tda8290_probe(&i2c_props) == 0) || (tda8295_probe(&i2c_props) == 0)) return 0; /* fall back to old probing method */ tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2); tuner_i2c_xfer_send(&i2c_props, soft_reset, 2); tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1); if (data == 0) { tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2); tuner_i2c_xfer_send(&i2c_props, soft_reset, 2); tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1); if (data == 0x7b) { return 0; } } tuner_i2c_xfer_send(&i2c_props, restore_9886, 3); return -ENODEV; } EXPORT_SYMBOL_GPL(tda829x_probe); MODULE_DESCRIPTION("Philips/NXP TDA8290/TDA8295 analog IF demodulator driver"); MODULE_AUTHOR("Gerd Knorr, Hartmut Hackmann, Michael Krufky"); MODULE_LICENSE("GPL"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
Framework43/touchpad-kernel
fs/reiserfs/dir.c
3458
9045
/* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ #include <linux/string.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/reiserfs_fs.h> #include <linux/stat.h> #include <linux/buffer_head.h> #include <linux/slab.h> #include <asm/uaccess.h> extern const struct reiserfs_key MIN_KEY; static int reiserfs_readdir(struct file *, void *, filldir_t); static int reiserfs_dir_fsync(struct file *filp, int datasync); const struct file_operations reiserfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = reiserfs_readdir, .fsync = reiserfs_dir_fsync, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = reiserfs_compat_ioctl, #endif }; static int reiserfs_dir_fsync(struct file *filp, int datasync) { struct inode *inode = filp->f_mapping->host; int err; reiserfs_write_lock(inode->i_sb); err = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); if (err < 0) return err; return 0; } #define store_ih(where,what) copy_item_head (where, what) static inline bool is_privroot_deh(struct dentry *dir, struct reiserfs_de_head *deh) { struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; return (dir == dir->d_parent && privroot->d_inode && deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); } int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent, filldir_t filldir, loff_t *pos) { struct inode *inode = dentry->d_inode; struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */ INITIALIZE_PATH(path_to_entry); struct buffer_head *bh; int item_num, entry_num; const struct reiserfs_key *rkey; struct item_head *ih, tmp_ih; int search_res; char *local_buf; loff_t next_pos; char small_buf[32]; /* avoid kmalloc if we can */ struct reiserfs_dir_entry de; int ret = 0; reiserfs_write_lock(inode->i_sb); reiserfs_check_lock_depth(inode->i_sb, "readdir"); /* form key for search the next directory entry using f_pos field of file structure */ make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3); next_pos = cpu_key_k_offset(&pos_key); path_to_entry.reada = PATH_READA; while (1) { research: /* search the directory item, containing entry with specified key */ search_res = search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry, &de); if (search_res == IO_ERROR) { // FIXME: we could just skip part of directory which could // not be read ret = -EIO; goto out; } entry_num = de.de_entry_num; bh = de.de_bh; item_num = de.de_item_num; ih = de.de_ih; store_ih(&tmp_ih, ih); /* we must have found item, that is item of this directory, */ RFALSE(COMP_SHORT_KEYS(&(ih->ih_key), &pos_key), "vs-9000: found item %h does not match to dir we readdir %K", ih, &pos_key); RFALSE(item_num > B_NR_ITEMS(bh) - 1, "vs-9005 item_num == %d, item amount == %d", item_num, B_NR_ITEMS(bh)); /* and entry must be not more than number of entries in the item */ RFALSE(I_ENTRY_COUNT(ih) < entry_num, "vs-9010: entry number is too big %d (%d)", entry_num, I_ENTRY_COUNT(ih)); if (search_res == POSITION_FOUND || entry_num < I_ENTRY_COUNT(ih)) { /* go through all entries in the directory item beginning from the entry, that has been found */ struct reiserfs_de_head *deh = B_I_DEH(bh, ih) + entry_num; for (; entry_num < I_ENTRY_COUNT(ih); entry_num++, deh++) { int d_reclen; char *d_name; off_t d_off; ino_t d_ino; if (!de_visible(deh)) /* it is hidden entry */ continue; d_reclen = entry_length(bh, ih, entry_num); d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh); if (d_reclen <= 0 || d_name + d_reclen > bh->b_data + bh->b_size) { /* There is corrupted data in entry, * We'd better stop here */ pathrelse(&path_to_entry); ret = -EIO; goto out; } if (!d_name[d_reclen - 1]) d_reclen = strlen(d_name); if (d_reclen > REISERFS_MAX_NAME(inode->i_sb-> s_blocksize)) { /* too big to send back to VFS */ continue; } /* Ignore the .reiserfs_priv entry */ if (is_privroot_deh(dentry, deh)) continue; d_off = deh_offset(deh); *pos = d_off; d_ino = deh_objectid(deh); if (d_reclen <= 32) { local_buf = small_buf; } else { local_buf = kmalloc(d_reclen, GFP_NOFS); if (!local_buf) { pathrelse(&path_to_entry); ret = -ENOMEM; goto out; } if (item_moved(&tmp_ih, &path_to_entry)) { kfree(local_buf); goto research; } } // Note, that we copy name to user space via temporary // buffer (local_buf) because filldir will block if // user space buffer is swapped out. At that time // entry can move to somewhere else memcpy(local_buf, d_name, d_reclen); /* * Since filldir might sleep, we can release * the write lock here for other waiters */ reiserfs_write_unlock(inode->i_sb); if (filldir (dirent, local_buf, d_reclen, d_off, d_ino, DT_UNKNOWN) < 0) { reiserfs_write_lock(inode->i_sb); if (local_buf != small_buf) { kfree(local_buf); } goto end; } reiserfs_write_lock(inode->i_sb); if (local_buf != small_buf) { kfree(local_buf); } // next entry should be looked for with such offset next_pos = deh_offset(deh) + 1; if (item_moved(&tmp_ih, &path_to_entry)) { goto research; } } /* for */ } if (item_num != B_NR_ITEMS(bh) - 1) // end of directory has been reached goto end; /* item we went through is last item of node. Using right delimiting key check is it directory end */ rkey = get_rkey(&path_to_entry, inode->i_sb); if (!comp_le_keys(rkey, &MIN_KEY)) { /* set pos_key to key, that is the smallest and greater that key of the last entry in the item */ set_cpu_key_k_offset(&pos_key, next_pos); continue; } if (COMP_SHORT_KEYS(rkey, &pos_key)) { // end of directory has been reached goto end; } /* directory continues in the right neighboring block */ set_cpu_key_k_offset(&pos_key, le_key_k_offset(KEY_FORMAT_3_5, rkey)); } /* while */ end: *pos = next_pos; pathrelse(&path_to_entry); reiserfs_check_path(&path_to_entry); out: reiserfs_write_unlock(inode->i_sb); return ret; } static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir) { struct dentry *dentry = file->f_path.dentry; return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos); } /* compose directory item containing "." and ".." entries (entries are not aligned to 4 byte boundary) */ /* the last four params are LE */ void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid, __le32 par_dirid, __le32 par_objid) { struct reiserfs_de_head *deh; memset(body, 0, EMPTY_DIR_SIZE_V1); deh = (struct reiserfs_de_head *)body; /* direntry header of "." */ put_deh_offset(&(deh[0]), DOT_OFFSET); /* these two are from make_le_item_head, and are are LE */ deh[0].deh_dir_id = dirid; deh[0].deh_objectid = objid; deh[0].deh_state = 0; /* Endian safe if 0 */ put_deh_location(&(deh[0]), EMPTY_DIR_SIZE_V1 - strlen(".")); mark_de_visible(&(deh[0])); /* direntry header of ".." */ put_deh_offset(&(deh[1]), DOT_DOT_OFFSET); /* key of ".." for the root directory */ /* these two are from the inode, and are are LE */ deh[1].deh_dir_id = par_dirid; deh[1].deh_objectid = par_objid; deh[1].deh_state = 0; /* Endian safe if 0 */ put_deh_location(&(deh[1]), deh_location(&(deh[0])) - strlen("..")); mark_de_visible(&(deh[1])); /* copy ".." and "." */ memcpy(body + deh_location(&(deh[0])), ".", 1); memcpy(body + deh_location(&(deh[1])), "..", 2); } /* compose directory item containing "." and ".." entries */ void make_empty_dir_item(char *body, __le32 dirid, __le32 objid, __le32 par_dirid, __le32 par_objid) { struct reiserfs_de_head *deh; memset(body, 0, EMPTY_DIR_SIZE); deh = (struct reiserfs_de_head *)body; /* direntry header of "." */ put_deh_offset(&(deh[0]), DOT_OFFSET); /* these two are from make_le_item_head, and are are LE */ deh[0].deh_dir_id = dirid; deh[0].deh_objectid = objid; deh[0].deh_state = 0; /* Endian safe if 0 */ put_deh_location(&(deh[0]), EMPTY_DIR_SIZE - ROUND_UP(strlen("."))); mark_de_visible(&(deh[0])); /* direntry header of ".." */ put_deh_offset(&(deh[1]), DOT_DOT_OFFSET); /* key of ".." for the root directory */ /* these two are from the inode, and are are LE */ deh[1].deh_dir_id = par_dirid; deh[1].deh_objectid = par_objid; deh[1].deh_state = 0; /* Endian safe if 0 */ put_deh_location(&(deh[1]), deh_location(&(deh[0])) - ROUND_UP(strlen(".."))); mark_de_visible(&(deh[1])); /* copy ".." and "." */ memcpy(body + deh_location(&(deh[0])), ".", 1); memcpy(body + deh_location(&(deh[1])), "..", 2); }
gpl-2.0
KryptonOmni/android_kernel_lge_mako
drivers/media/video/m5mols/m5mols_capture.c
4738
4323
/* * The Capture code for Fujitsu M-5MOLS ISP * * Copyright (C) 2011 Samsung Electronics Co., Ltd. * Author: HeungJun Kim <riverful.kim@samsung.com> * * Copyright (C) 2009 Samsung Electronics Co., Ltd. * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/m5mols.h> #include <media/s5p_fimc.h> #include "m5mols.h" #include "m5mols_reg.h" /** * m5mols_read_rational - I2C read of a rational number * * Read numerator and denominator from registers @addr_num and @addr_den * respectively and return the division result in @val. */ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num, u32 addr_den, u32 *val) { u32 num, den; int ret = m5mols_read_u32(sd, addr_num, &num); if (!ret) ret = m5mols_read_u32(sd, addr_den, &den); if (ret) return ret; *val = den == 0 ? 0 : num / den; return ret; } /** * m5mols_capture_info - Gather captured image information * * For now it gathers only EXIF information and file size. */ static int m5mols_capture_info(struct m5mols_info *info) { struct m5mols_exif *exif = &info->cap.exif; struct v4l2_subdev *sd = &info->sd; int ret; ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU, EXIF_INFO_EXPTIME_DE, &exif->exposure_time); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE, &exif->shutter_speed); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE, &exif->aperture); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE, &exif->brightness); if (ret) return ret; ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE, &exif->exposure_bias); if (ret) return ret; ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr); if (!ret) ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval); if (ret) return ret; if (!ret) ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main); if (!ret) ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb); if (!ret) info->cap.total = info->cap.main + info->cap.thumb; return ret; } int m5mols_start_capture(struct m5mols_info *info) { struct v4l2_subdev *sd = &info->sd; u8 resolution = info->resolution; int ret; /* * Synchronize the controls, set the capture frame resolution and color * format. The frame capture is initiated during switching from Monitor * to Capture mode. */ ret = m5mols_mode(info, REG_MONITOR); if (!ret) ret = m5mols_restore_controls(info); if (!ret) ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG); if (!ret) ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution); if (!ret) ret = m5mols_lock_3a(info, true); if (!ret) ret = m5mols_mode(info, REG_CAPTURE); if (!ret) /* Wait until a frame is captured to ISP internal memory */ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000); if (!ret) ret = m5mols_lock_3a(info, false); if (ret) return ret; /* * Initiate the captured data transfer to a MIPI-CSI receiver. */ ret = m5mols_write(sd, CAPC_SEL_FRAME, 1); if (!ret) ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN); if (!ret) { bool captured = false; unsigned int size; /* Wait for the capture completion interrupt */ ret = m5mols_wait_interrupt(sd, REG_INT_CAPTURE, 2000); if (!ret) { captured = true; ret = m5mols_capture_info(info); } size = captured ? info->cap.main : 0; v4l2_dbg(1, m5mols_debug, sd, "%s: size: %d, thumb.: %d B\n", __func__, size, info->cap.thumb); v4l2_subdev_notify(sd, S5P_FIMC_TX_END_NOTIFY, &size); } return ret; }
gpl-2.0
DennisBold/CodeAurora-MSM-Kernel
arch/arm/mach-kirkwood/board-dreamplug.c
4738
3499
/* * Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net> * * arch/arm/mach-kirkwood/board-dreamplug.c * * Marvell DreamPlug Reference Board Init for drivers not converted to * flattened device tree yet. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/mtd/physmap.h> #include <linux/spi/flash.h> #include <linux/spi/spi.h> #include <linux/spi/orion_spi.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/kirkwood.h> #include <mach/bridge-regs.h> #include <plat/mvsdio.h> #include "common.h" #include "mpp.h" struct mtd_partition dreamplug_partitions[] = { { .name = "u-boot", .size = SZ_512K, .offset = 0, }, { .name = "u-boot env", .size = SZ_64K, .offset = SZ_512K + SZ_512K, }, { .name = "dtb", .size = SZ_64K, .offset = SZ_512K + SZ_512K + SZ_512K, }, }; static const struct flash_platform_data dreamplug_spi_slave_data = { .type = "mx25l1606e", .name = "spi_flash", .parts = dreamplug_partitions, .nr_parts = ARRAY_SIZE(dreamplug_partitions), }; static struct spi_board_info __initdata dreamplug_spi_slave_info[] = { { .modalias = "m25p80", .platform_data = &dreamplug_spi_slave_data, .irq = -1, .max_speed_hz = 50000000, .bus_num = 0, .chip_select = 0, }, }; static struct mv643xx_eth_platform_data dreamplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv643xx_eth_platform_data dreamplug_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(1), }; static struct mv_sata_platform_data dreamplug_sata_data = { .n_ports = 1, }; static struct mvsdio_platform_data dreamplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; static struct gpio_led dreamplug_led_pins[] = { { .name = "dreamplug:blue:bluetooth", .gpio = 47, .active_low = 1, }, { .name = "dreamplug:green:wifi", .gpio = 48, .active_low = 1, }, { .name = "dreamplug:green:wifi_ap", .gpio = 49, .active_low = 1, }, }; static struct gpio_led_platform_data dreamplug_led_data = { .leds = dreamplug_led_pins, .num_leds = ARRAY_SIZE(dreamplug_led_pins), }; static struct platform_device dreamplug_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &dreamplug_led_data, } }; static unsigned int dreamplug_mpp_config[] __initdata = { MPP0_SPI_SCn, MPP1_SPI_MOSI, MPP2_SPI_SCK, MPP3_SPI_MISO, MPP47_GPIO, /* Bluetooth LED */ MPP48_GPIO, /* Wifi LED */ MPP49_GPIO, /* Wifi AP LED */ 0 }; void __init dreamplug_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_mpp_conf(dreamplug_mpp_config); spi_register_board_info(dreamplug_spi_slave_info, ARRAY_SIZE(dreamplug_spi_slave_info)); kirkwood_spi_init(); kirkwood_ehci_init(); kirkwood_ge00_init(&dreamplug_ge00_data); kirkwood_ge01_init(&dreamplug_ge01_data); kirkwood_sata_init(&dreamplug_sata_data); kirkwood_sdio_init(&dreamplug_mvsdio_data); platform_device_register(&dreamplug_leds); }
gpl-2.0
ShinyROM/android_kernel_asus_flo
drivers/video/pmag-ba-fb.c
4994
6676
/* * linux/drivers/video/pmag-ba-fb.c * * PMAG-BA TURBOchannel Color Frame Buffer (CFB) card support, * derived from: * "HP300 Topcat framebuffer support (derived from macfb of all things) * Phil Blundell <philb@gnu.org> 1998", the original code can be * found in the file hpfb.c in the same directory. * * Based on digital document: * "PMAG-BA TURBOchannel Color Frame Buffer * Functional Specification", Revision 1.2, August 27, 1990 * * DECstation related code Copyright (C) 1999, 2000, 2001 by * Michael Engel <engel@unix-ag.org>, * Karsten Merker <merker@linuxtag.org> and * Harald Koerfgen. * Copyright (c) 2005, 2006 Maciej W. Rozycki * Copyright (c) 2005 James Simmons * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/tc.h> #include <linux/types.h> #include <asm/io.h> #include <video/pmag-ba-fb.h> struct pmagbafb_par { volatile void __iomem *mmio; volatile u32 __iomem *dac; }; static struct fb_var_screeninfo pmagbafb_defined __devinitdata = { .xres = 1024, .yres = 864, .xres_virtual = 1024, .yres_virtual = 864, .bits_per_pixel = 8, .red.length = 8, .green.length = 8, .blue.length = 8, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .pixclock = 14452, .left_margin = 116, .right_margin = 12, .upper_margin = 34, .lower_margin = 12, .hsync_len = 128, .vsync_len = 3, .sync = FB_SYNC_ON_GREEN, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo pmagbafb_fix __devinitdata = { .id = "PMAG-BA", .smem_len = (1024 * 1024), .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .line_length = 1024, .mmio_len = PMAG_BA_SIZE - PMAG_BA_BT459, }; static inline void dac_write(struct pmagbafb_par *par, unsigned int reg, u8 v) { writeb(v, par->dac + reg / 4); } static inline u8 dac_read(struct pmagbafb_par *par, unsigned int reg) { return readb(par->dac + reg / 4); } /* * Set the palette. */ static int pmagbafb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { struct pmagbafb_par *par = info->par; if (regno >= info->cmap.len) return 1; red >>= 8; /* The cmap fields are 16 bits */ green >>= 8; /* wide, but the hardware colormap */ blue >>= 8; /* registers are only 8 bits wide */ mb(); dac_write(par, BT459_ADDR_LO, regno); dac_write(par, BT459_ADDR_HI, 0x00); wmb(); dac_write(par, BT459_CMAP, red); wmb(); dac_write(par, BT459_CMAP, green); wmb(); dac_write(par, BT459_CMAP, blue); return 0; } static struct fb_ops pmagbafb_ops = { .owner = THIS_MODULE, .fb_setcolreg = pmagbafb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Turn the hardware cursor off. */ static void __init pmagbafb_erase_cursor(struct fb_info *info) { struct pmagbafb_par *par = info->par; mb(); dac_write(par, BT459_ADDR_LO, 0x00); dac_write(par, BT459_ADDR_HI, 0x03); wmb(); dac_write(par, BT459_DATA, 0x00); } static int __devinit pmagbafb_probe(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); resource_size_t start, len; struct fb_info *info; struct pmagbafb_par *par; int err; info = framebuffer_alloc(sizeof(struct pmagbafb_par), dev); if (!info) { printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev)); return -ENOMEM; } par = info->par; dev_set_drvdata(dev, info); if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { printk(KERN_ERR "%s: Cannot allocate color map\n", dev_name(dev)); err = -ENOMEM; goto err_alloc; } info->fbops = &pmagbafb_ops; info->fix = pmagbafb_fix; info->var = pmagbafb_defined; info->flags = FBINFO_DEFAULT; /* Request the I/O MEM resource. */ start = tdev->resource.start; len = tdev->resource.end - start + 1; if (!request_mem_region(start, len, dev_name(dev))) { printk(KERN_ERR "%s: Cannot reserve FB region\n", dev_name(dev)); err = -EBUSY; goto err_cmap; } /* MMIO mapping setup. */ info->fix.mmio_start = start; par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; goto err_resource; } par->dac = par->mmio + PMAG_BA_BT459; /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAG_BA_FBMEM; info->screen_base = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); err = -ENOMEM; goto err_mmio_map; } info->screen_size = info->fix.smem_len; pmagbafb_erase_cursor(info); err = register_framebuffer(info); if (err < 0) { printk(KERN_ERR "%s: Cannot register framebuffer\n", dev_name(dev)); goto err_smem_map; } get_device(dev); pr_info("fb%d: %s frame buffer device at %s\n", info->node, info->fix.id, dev_name(dev)); return 0; err_smem_map: iounmap(info->screen_base); err_mmio_map: iounmap(par->mmio); err_resource: release_mem_region(start, len); err_cmap: fb_dealloc_cmap(&info->cmap); err_alloc: framebuffer_release(info); return err; } static int __exit pmagbafb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); struct pmagbafb_par *par = info->par; resource_size_t start, len; put_device(dev); unregister_framebuffer(info); iounmap(info->screen_base); iounmap(par->mmio); start = tdev->resource.start; len = tdev->resource.end - start + 1; release_mem_region(start, len); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return 0; } /* * Initialize the framebuffer. */ static const struct tc_device_id pmagbafb_tc_table[] = { { "DEC ", "PMAG-BA " }, { } }; MODULE_DEVICE_TABLE(tc, pmagbafb_tc_table); static struct tc_driver pmagbafb_driver = { .id_table = pmagbafb_tc_table, .driver = { .name = "pmagbafb", .bus = &tc_bus_type, .probe = pmagbafb_probe, .remove = __exit_p(pmagbafb_remove), }, }; static int __init pmagbafb_init(void) { #ifndef MODULE if (fb_get_options("pmagbafb", NULL)) return -ENXIO; #endif return tc_register_driver(&pmagbafb_driver); } static void __exit pmagbafb_exit(void) { tc_unregister_driver(&pmagbafb_driver); } module_init(pmagbafb_init); module_exit(pmagbafb_exit); MODULE_LICENSE("GPL");
gpl-2.0
arrrghhh/android_kernel_samsung_mondrianwifi
drivers/video/pmagb-b-fb.c
4994
10464
/* * linux/drivers/video/pmagb-b-fb.c * * PMAGB-B TURBOchannel Smart Frame Buffer (SFB) card support, * derived from: * "HP300 Topcat framebuffer support (derived from macfb of all things) * Phil Blundell <philb@gnu.org> 1998", the original code can be * found in the file hpfb.c in the same directory. * * DECstation related code Copyright (C) 1999, 2000, 2001 by * Michael Engel <engel@unix-ag.org>, * Karsten Merker <merker@linuxtag.org> and * Harald Koerfgen. * Copyright (c) 2005, 2006 Maciej W. Rozycki * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/compiler.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/tc.h> #include <linux/types.h> #include <asm/io.h> #include <video/pmagb-b-fb.h> struct pmagbbfb_par { volatile void __iomem *mmio; volatile void __iomem *smem; volatile u32 __iomem *sfb; volatile u32 __iomem *dac; unsigned int osc0; unsigned int osc1; int slot; }; static struct fb_var_screeninfo pmagbbfb_defined __devinitdata = { .bits_per_pixel = 8, .red.length = 8, .green.length = 8, .blue.length = 8, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .sync = FB_SYNC_ON_GREEN, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo pmagbbfb_fix __devinitdata = { .id = "PMAGB-BA", .smem_len = (2048 * 1024), .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .mmio_len = PMAGB_B_FBMEM, }; static inline void sfb_write(struct pmagbbfb_par *par, unsigned int reg, u32 v) { writel(v, par->sfb + reg / 4); } static inline u32 sfb_read(struct pmagbbfb_par *par, unsigned int reg) { return readl(par->sfb + reg / 4); } static inline void dac_write(struct pmagbbfb_par *par, unsigned int reg, u8 v) { writeb(v, par->dac + reg / 4); } static inline u8 dac_read(struct pmagbbfb_par *par, unsigned int reg) { return readb(par->dac + reg / 4); } static inline void gp0_write(struct pmagbbfb_par *par, u32 v) { writel(v, par->mmio + PMAGB_B_GP0); } /* * Set the palette. */ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { struct pmagbbfb_par *par = info->par; if (regno >= info->cmap.len) return 1; red >>= 8; /* The cmap fields are 16 bits */ green >>= 8; /* wide, but the hardware colormap */ blue >>= 8; /* registers are only 8 bits wide */ mb(); dac_write(par, BT459_ADDR_LO, regno); dac_write(par, BT459_ADDR_HI, 0x00); wmb(); dac_write(par, BT459_CMAP, red); wmb(); dac_write(par, BT459_CMAP, green); wmb(); dac_write(par, BT459_CMAP, blue); return 0; } static struct fb_ops pmagbbfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = pmagbbfb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Turn the hardware cursor off. */ static void __init pmagbbfb_erase_cursor(struct fb_info *info) { struct pmagbbfb_par *par = info->par; mb(); dac_write(par, BT459_ADDR_LO, 0x00); dac_write(par, BT459_ADDR_HI, 0x03); wmb(); dac_write(par, BT459_DATA, 0x00); } /* * Set up screen parameters. */ static void __devinit pmagbbfb_screen_setup(struct fb_info *info) { struct pmagbbfb_par *par = info->par; info->var.xres = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_PIX_SHIFT) & SFB_VID_HOR_PIX_MASK) * 4; info->var.xres_virtual = info->var.xres; info->var.yres = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_SL_SHIFT) & SFB_VID_VER_SL_MASK; info->var.yres_virtual = info->var.yres; info->var.left_margin = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_BP_SHIFT) & SFB_VID_HOR_BP_MASK) * 4; info->var.right_margin = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_FP_SHIFT) & SFB_VID_HOR_FP_MASK) * 4; info->var.upper_margin = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_BP_SHIFT) & SFB_VID_VER_BP_MASK; info->var.lower_margin = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_FP_SHIFT) & SFB_VID_VER_FP_MASK; info->var.hsync_len = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_SYN_SHIFT) & SFB_VID_HOR_SYN_MASK) * 4; info->var.vsync_len = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_SYN_SHIFT) & SFB_VID_VER_SYN_MASK; info->fix.line_length = info->var.xres; }; /* * Determine oscillator configuration. */ static void __devinit pmagbbfb_osc_setup(struct fb_info *info) { static unsigned int pmagbbfb_freqs[] __devinitdata = { 130808, 119843, 104000, 92980, 74370, 72800, 69197, 66000, 65000, 50350, 36000, 32000, 25175 }; struct pmagbbfb_par *par = info->par; struct tc_bus *tbus = to_tc_dev(info->device)->bus; u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8; u32 freq0, freq1, freqtc = tc_get_speed(tbus) / 250; int i, j; gp0_write(par, 0); /* select Osc0 */ for (j = 0; j < 16; j++) { mb(); sfb_write(par, SFB_REG_TCCLK_COUNT, 0); mb(); for (i = 0; i < 100; i++) { /* nominally max. 20.5us */ if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0) break; udelay(1); } count0 += sfb_read(par, SFB_REG_VIDCLK_COUNT); } gp0_write(par, 1); /* select Osc1 */ for (j = 0; j < 16; j++) { mb(); sfb_write(par, SFB_REG_TCCLK_COUNT, 0); for (i = 0; i < 100; i++) { /* nominally max. 20.5us */ if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0) break; udelay(1); } count1 += sfb_read(par, SFB_REG_VIDCLK_COUNT); } freq0 = (freqtc * count0 + counttc / 2) / counttc; par->osc0 = freq0; if (freq0 >= pmagbbfb_freqs[0] - (pmagbbfb_freqs[0] + 32) / 64 && freq0 <= pmagbbfb_freqs[0] + (pmagbbfb_freqs[0] + 32) / 64) par->osc0 = pmagbbfb_freqs[0]; freq1 = (par->osc0 * count1 + count0 / 2) / count0; par->osc1 = freq1; for (i = 0; i < ARRAY_SIZE(pmagbbfb_freqs); i++) if (freq1 >= pmagbbfb_freqs[i] - (pmagbbfb_freqs[i] + 128) / 256 && freq1 <= pmagbbfb_freqs[i] + (pmagbbfb_freqs[i] + 128) / 256) { par->osc1 = pmagbbfb_freqs[i]; break; } if (par->osc0 - par->osc1 <= (par->osc0 + par->osc1 + 256) / 512 || par->osc1 - par->osc0 <= (par->osc0 + par->osc1 + 256) / 512) par->osc1 = 0; gp0_write(par, par->osc1 != 0); /* reselect OscX */ info->var.pixclock = par->osc1 ? (1000000000 + par->osc1 / 2) / par->osc1 : (1000000000 + par->osc0 / 2) / par->osc0; }; static int __devinit pmagbbfb_probe(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); resource_size_t start, len; struct fb_info *info; struct pmagbbfb_par *par; char freq0[12], freq1[12]; u32 vid_base; int err; info = framebuffer_alloc(sizeof(struct pmagbbfb_par), dev); if (!info) { printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev)); return -ENOMEM; } par = info->par; dev_set_drvdata(dev, info); if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { printk(KERN_ERR "%s: Cannot allocate color map\n", dev_name(dev)); err = -ENOMEM; goto err_alloc; } info->fbops = &pmagbbfb_ops; info->fix = pmagbbfb_fix; info->var = pmagbbfb_defined; info->flags = FBINFO_DEFAULT; /* Request the I/O MEM resource. */ start = tdev->resource.start; len = tdev->resource.end - start + 1; if (!request_mem_region(start, len, dev_name(dev))) { printk(KERN_ERR "%s: Cannot reserve FB region\n", dev_name(dev)); err = -EBUSY; goto err_cmap; } /* MMIO mapping setup. */ info->fix.mmio_start = start; par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; goto err_resource; } par->sfb = par->mmio + PMAGB_B_SFB; par->dac = par->mmio + PMAGB_B_BT459; /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAGB_B_FBMEM; par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); if (!par->smem) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); err = -ENOMEM; goto err_mmio_map; } vid_base = sfb_read(par, SFB_REG_VID_BASE); info->screen_base = (void __iomem *)par->smem + vid_base * 0x1000; info->screen_size = info->fix.smem_len - 2 * vid_base * 0x1000; pmagbbfb_erase_cursor(info); pmagbbfb_screen_setup(info); pmagbbfb_osc_setup(info); err = register_framebuffer(info); if (err < 0) { printk(KERN_ERR "%s: Cannot register framebuffer\n", dev_name(dev)); goto err_smem_map; } get_device(dev); snprintf(freq0, sizeof(freq0), "%u.%03uMHz", par->osc0 / 1000, par->osc0 % 1000); snprintf(freq1, sizeof(freq1), "%u.%03uMHz", par->osc1 / 1000, par->osc1 % 1000); pr_info("fb%d: %s frame buffer device at %s\n", info->node, info->fix.id, dev_name(dev)); pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n", info->node, freq0, par->osc1 ? freq1 : "disabled", par->osc1 != 0); return 0; err_smem_map: iounmap(par->smem); err_mmio_map: iounmap(par->mmio); err_resource: release_mem_region(start, len); err_cmap: fb_dealloc_cmap(&info->cmap); err_alloc: framebuffer_release(info); return err; } static int __exit pmagbbfb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); struct pmagbbfb_par *par = info->par; resource_size_t start, len; put_device(dev); unregister_framebuffer(info); iounmap(par->smem); iounmap(par->mmio); start = tdev->resource.start; len = tdev->resource.end - start + 1; release_mem_region(start, len); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return 0; } /* * Initialize the framebuffer. */ static const struct tc_device_id pmagbbfb_tc_table[] = { { "DEC ", "PMAGB-BA" }, { } }; MODULE_DEVICE_TABLE(tc, pmagbbfb_tc_table); static struct tc_driver pmagbbfb_driver = { .id_table = pmagbbfb_tc_table, .driver = { .name = "pmagbbfb", .bus = &tc_bus_type, .probe = pmagbbfb_probe, .remove = __exit_p(pmagbbfb_remove), }, }; static int __init pmagbbfb_init(void) { #ifndef MODULE if (fb_get_options("pmagbbfb", NULL)) return -ENXIO; #endif return tc_register_driver(&pmagbbfb_driver); } static void __exit pmagbbfb_exit(void) { tc_unregister_driver(&pmagbbfb_driver); } module_init(pmagbbfb_init); module_exit(pmagbbfb_exit); MODULE_LICENSE("GPL");
gpl-2.0
TeamWin/android_kernel_lge_msm8974
drivers/hwmon/pmbus/max8688.c
8834
5501
/* * Hardware monitoring driver for Maxim MAX8688 * * Copyright (c) 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include "pmbus.h" #define MAX8688_MFR_VOUT_PEAK 0xd4 #define MAX8688_MFR_IOUT_PEAK 0xd5 #define MAX8688_MFR_TEMPERATURE_PEAK 0xd6 #define MAX8688_MFG_STATUS 0xd8 #define MAX8688_STATUS_OC_FAULT (1 << 4) #define MAX8688_STATUS_OV_FAULT (1 << 5) #define MAX8688_STATUS_OV_WARNING (1 << 8) #define MAX8688_STATUS_UV_FAULT (1 << 9) #define MAX8688_STATUS_UV_WARNING (1 << 10) #define MAX8688_STATUS_UC_FAULT (1 << 11) #define MAX8688_STATUS_OC_WARNING (1 << 12) #define MAX8688_STATUS_OT_FAULT (1 << 13) #define MAX8688_STATUS_OT_WARNING (1 << 14) static int max8688_read_word_data(struct i2c_client *client, int page, int reg) { int ret; if (page) return -ENXIO; switch (reg) { case PMBUS_VIRT_READ_VOUT_MAX: ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK); break; case PMBUS_VIRT_READ_IOUT_MAX: ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK); break; case PMBUS_VIRT_READ_TEMP_MAX: ret = pmbus_read_word_data(client, 0, MAX8688_MFR_TEMPERATURE_PEAK); break; case PMBUS_VIRT_RESET_VOUT_HISTORY: case PMBUS_VIRT_RESET_IOUT_HISTORY: case PMBUS_VIRT_RESET_TEMP_HISTORY: ret = 0; break; default: ret = -ENODATA; break; } return ret; } static int max8688_write_word_data(struct i2c_client *client, int page, int reg, u16 word) { int ret; switch (reg) { case PMBUS_VIRT_RESET_VOUT_HISTORY: ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK, 0); break; case PMBUS_VIRT_RESET_IOUT_HISTORY: ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK, 0); break; case PMBUS_VIRT_RESET_TEMP_HISTORY: ret = pmbus_write_word_data(client, 0, MAX8688_MFR_TEMPERATURE_PEAK, 0xffff); break; default: ret = -ENODATA; break; } return ret; } static int max8688_read_byte_data(struct i2c_client *client, int page, int reg) { int ret = 0; int mfg_status; if (page > 0) return -ENXIO; switch (reg) { case PMBUS_STATUS_VOUT: mfg_status = pmbus_read_word_data(client, 0, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; if (mfg_status & MAX8688_STATUS_UV_WARNING) ret |= PB_VOLTAGE_UV_WARNING; if (mfg_status & MAX8688_STATUS_UV_FAULT) ret |= PB_VOLTAGE_UV_FAULT; if (mfg_status & MAX8688_STATUS_OV_WARNING) ret |= PB_VOLTAGE_OV_WARNING; if (mfg_status & MAX8688_STATUS_OV_FAULT) ret |= PB_VOLTAGE_OV_FAULT; break; case PMBUS_STATUS_IOUT: mfg_status = pmbus_read_word_data(client, 0, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; if (mfg_status & MAX8688_STATUS_UC_FAULT) ret |= PB_IOUT_UC_FAULT; if (mfg_status & MAX8688_STATUS_OC_WARNING) ret |= PB_IOUT_OC_WARNING; if (mfg_status & MAX8688_STATUS_OC_FAULT) ret |= PB_IOUT_OC_FAULT; break; case PMBUS_STATUS_TEMPERATURE: mfg_status = pmbus_read_word_data(client, 0, MAX8688_MFG_STATUS); if (mfg_status < 0) return mfg_status; if (mfg_status & MAX8688_STATUS_OT_WARNING) ret |= PB_TEMP_OT_WARNING; if (mfg_status & MAX8688_STATUS_OT_FAULT) ret |= PB_TEMP_OT_FAULT; break; default: ret = -ENODATA; break; } return ret; } static struct pmbus_driver_info max8688_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = direct, .format[PSC_VOLTAGE_OUT] = direct, .format[PSC_TEMPERATURE] = direct, .format[PSC_CURRENT_OUT] = direct, .m[PSC_VOLTAGE_IN] = 19995, .b[PSC_VOLTAGE_IN] = 0, .R[PSC_VOLTAGE_IN] = -1, .m[PSC_VOLTAGE_OUT] = 19995, .b[PSC_VOLTAGE_OUT] = 0, .R[PSC_VOLTAGE_OUT] = -1, .m[PSC_CURRENT_OUT] = 23109, .b[PSC_CURRENT_OUT] = 0, .R[PSC_CURRENT_OUT] = -2, .m[PSC_TEMPERATURE] = -7612, .b[PSC_TEMPERATURE] = 335, .R[PSC_TEMPERATURE] = -3, .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_TEMP, .read_byte_data = max8688_read_byte_data, .read_word_data = max8688_read_word_data, .write_word_data = max8688_write_word_data, }; static int max8688_probe(struct i2c_client *client, const struct i2c_device_id *id) { return pmbus_do_probe(client, id, &max8688_info); } static const struct i2c_device_id max8688_id[] = { {"max8688", 0}, { } }; MODULE_DEVICE_TABLE(i2c, max8688_id); /* This is the driver that will be inserted */ static struct i2c_driver max8688_driver = { .driver = { .name = "max8688", }, .probe = max8688_probe, .remove = pmbus_do_remove, .id_table = max8688_id, }; module_i2c_driver(max8688_driver); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688"); MODULE_LICENSE("GPL");
gpl-2.0
AndroPlus-org/android_kernel_sony_msm8974ac
net/rxrpc/rxkad.c
9090
27965
/* Kerberos-based RxRPC security * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/ctype.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <keys/rxrpc-type.h> #define rxrpc_debug rxkad_debug #include "ar-internal.h" #define RXKAD_VERSION 2 #define MAXKRB5TICKETLEN 1024 #define RXKAD_TKT_TYPE_KERBEROS_V5 256 #define ANAME_SZ 40 /* size of authentication name */ #define INST_SZ 40 /* size of principal's instance */ #define REALM_SZ 40 /* size of principal's auth domain */ #define SNAME_SZ 40 /* size of service name */ unsigned rxrpc_debug; module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "rxkad debugging mask"); struct rxkad_level1_hdr { __be32 data_size; /* true data size (excluding padding) */ }; struct rxkad_level2_hdr { __be32 data_size; /* true data size (excluding padding) */ __be32 checksum; /* decrypted data checksum */ }; MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* * this holds a pinned cipher so that keventd doesn't get called by the cipher * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE * packets */ static struct crypto_blkcipher *rxkad_ci; static DEFINE_MUTEX(rxkad_ci_mutex); /* * initialise connection security */ static int rxkad_init_connection_security(struct rxrpc_connection *conn) { struct crypto_blkcipher *ci; struct rxrpc_key_token *token; int ret; _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); token = conn->key->payload.data; conn->security_ix = token->security_index; ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ci)) { _debug("no cipher"); ret = PTR_ERR(ci); goto error; } if (crypto_blkcipher_setkey(ci, token->kad->session_key, sizeof(token->kad->session_key)) < 0) BUG(); switch (conn->security_level) { case RXRPC_SECURITY_PLAIN: break; case RXRPC_SECURITY_AUTH: conn->size_align = 8; conn->security_size = sizeof(struct rxkad_level1_hdr); conn->header_size += sizeof(struct rxkad_level1_hdr); break; case RXRPC_SECURITY_ENCRYPT: conn->size_align = 8; conn->security_size = sizeof(struct rxkad_level2_hdr); conn->header_size += sizeof(struct rxkad_level2_hdr); break; default: ret = -EKEYREJECTED; goto error; } conn->cipher = ci; ret = 0; error: _leave(" = %d", ret); return ret; } /* * prime the encryption state with the invariant parts of a connection's * description */ static void rxkad_prime_packet_security(struct rxrpc_connection *conn) { struct rxrpc_key_token *token; struct blkcipher_desc desc; struct scatterlist sg[2]; struct rxrpc_crypt iv; struct { __be32 x[4]; } tmpbuf __attribute__((aligned(16))); /* must all be in same page */ _enter(""); if (!conn->key) return; token = conn->key->payload.data; memcpy(&iv, token->kad->session_key, sizeof(iv)); desc.tfm = conn->cipher; desc.info = iv.x; desc.flags = 0; tmpbuf.x[0] = conn->epoch; tmpbuf.x[1] = conn->cid; tmpbuf.x[2] = 0; tmpbuf.x[3] = htonl(conn->security_ix); sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]); _leave(""); } /* * partially encrypt a packet (level 1 security) */ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, struct sk_buff *skb, u32 data_size, void *sechdr) { struct rxrpc_skb_priv *sp; struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist sg[2]; struct { struct rxkad_level1_hdr hdr; __be32 first; /* first four bytes of data and padding */ } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ u16 check; sp = rxrpc_skb(skb); _enter(""); check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); data_size |= (u32) check << 16; tmpbuf.hdr.data_size = htonl(data_size); memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first)); /* start the encryption afresh */ memset(&iv, 0, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); _leave(" = 0"); return 0; } /* * wholly encrypt a packet (level 2 security) */ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct sk_buff *skb, u32 data_size, void *sechdr) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr rxkhdr __attribute__((aligned(8))); /* must be all on one page */ struct rxrpc_skb_priv *sp; struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; unsigned len; u16 check; int nsg; sp = rxrpc_skb(skb); _enter(""); check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); rxkhdr.data_size = htonl(data_size | (u32) check << 16); rxkhdr.checksum = 0; /* encrypt from the session key */ token = call->conn->key->payload.data; memcpy(&iv, token->kad->session_key, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); /* we want to encrypt the skbuff in-place */ nsg = skb_cow_data(skb, 0, &trailer); if (nsg < 0 || nsg > 16) return -ENOMEM; len = data_size + call->conn->size_align - 1; len &= ~(call->conn->size_align - 1); sg_init_table(sg, nsg); skb_to_sgvec(skb, sg, 0, len); crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); _leave(" = 0"); return 0; } /* * checksum an RxRPC packet header */ static int rxkad_secure_packet(const struct rxrpc_call *call, struct sk_buff *skb, size_t data_size, void *sechdr) { struct rxrpc_skb_priv *sp; struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist sg[2]; struct { __be32 x[2]; } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ __be32 x; u32 y; int ret; sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u},%zu,", call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq), data_size); if (!call->conn->cipher) return 0; ret = key_validate(call->conn->key); if (ret < 0) return ret; /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; /* calculate the security checksum */ x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); x |= sp->hdr.seq & cpu_to_be32(0x3fffffff); tmpbuf.x[0] = sp->hdr.callNumber; tmpbuf.x[1] = x; sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); y = ntohl(tmpbuf.x[1]); y = (y >> 16) & 0xffff; if (y == 0) y = 1; /* zero checksums are not permitted */ sp->hdr.cksum = htons(y); switch (call->conn->security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; case RXRPC_SECURITY_AUTH: ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr); break; case RXRPC_SECURITY_ENCRYPT: ret = rxkad_secure_packet_encrypt(call, skb, data_size, sechdr); break; default: ret = -EPERM; break; } _leave(" = %d [set %hx]", ret, y); return ret; } /* * decrypt partial encryption on a packet (level 1 security) */ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, struct sk_buff *skb, u32 *_abort_code) { struct rxkad_level1_hdr sechdr; struct rxrpc_skb_priv *sp; struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist sg[16]; struct sk_buff *trailer; u32 data_size, buf; u16 check; int nsg; _enter(""); sp = rxrpc_skb(skb); /* we want to decrypt the skbuff in-place */ nsg = skb_cow_data(skb, 0, &trailer); if (nsg < 0 || nsg > 16) goto nomem; sg_init_table(sg, nsg); skb_to_sgvec(skb, sg, 0, 8); /* start the decryption afresh */ memset(&iv, 0, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); /* remove the decrypted packet length */ if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) goto datalen_error; if (!skb_pull(skb, sizeof(sechdr))) BUG(); buf = ntohl(sechdr.data_size); data_size = buf & 0xffff; check = buf >> 16; check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); check &= 0xffff; if (check != 0) { *_abort_code = RXKADSEALEDINCON; goto protocol_error; } /* shorten the packet to remove the padding */ if (data_size > skb->len) goto datalen_error; else if (data_size < skb->len) skb->len = data_size; _leave(" = 0 [dlen=%x]", data_size); return 0; datalen_error: *_abort_code = RXKADDATALEN; protocol_error: _leave(" = -EPROTO"); return -EPROTO; nomem: _leave(" = -ENOMEM"); return -ENOMEM; } /* * wholly decrypt a packet (level 2 security) */ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, struct sk_buff *skb, u32 *_abort_code) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr sechdr; struct rxrpc_skb_priv *sp; struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; struct sk_buff *trailer; u32 data_size, buf; u16 check; int nsg; _enter(",{%d}", skb->len); sp = rxrpc_skb(skb); /* we want to decrypt the skbuff in-place */ nsg = skb_cow_data(skb, 0, &trailer); if (nsg < 0) goto nomem; sg = _sg; if (unlikely(nsg > 4)) { sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); if (!sg) goto nomem; } sg_init_table(sg, nsg); skb_to_sgvec(skb, sg, 0, skb->len); /* decrypt from the session key */ token = call->conn->key->payload.data; memcpy(&iv, token->kad->session_key, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); if (sg != _sg) kfree(sg); /* remove the decrypted packet length */ if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) goto datalen_error; if (!skb_pull(skb, sizeof(sechdr))) BUG(); buf = ntohl(sechdr.data_size); data_size = buf & 0xffff; check = buf >> 16; check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); check &= 0xffff; if (check != 0) { *_abort_code = RXKADSEALEDINCON; goto protocol_error; } /* shorten the packet to remove the padding */ if (data_size > skb->len) goto datalen_error; else if (data_size < skb->len) skb->len = data_size; _leave(" = 0 [dlen=%x]", data_size); return 0; datalen_error: *_abort_code = RXKADDATALEN; protocol_error: _leave(" = -EPROTO"); return -EPROTO; nomem: _leave(" = -ENOMEM"); return -ENOMEM; } /* * verify the security on a received packet */ static int rxkad_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb, u32 *_abort_code) { struct blkcipher_desc desc; struct rxrpc_skb_priv *sp; struct rxrpc_crypt iv; struct scatterlist sg[2]; struct { __be32 x[2]; } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ __be32 x; __be16 cksum; u32 y; int ret; sp = rxrpc_skb(skb); _enter("{%d{%x}},{#%u}", call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq)); if (!call->conn->cipher) return 0; if (sp->hdr.securityIndex != RXRPC_SECURITY_RXKAD) { *_abort_code = RXKADINCONSISTENCY; _leave(" = -EPROTO [not rxkad]"); return -EPROTO; } /* continue encrypting from where we left off */ memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); desc.tfm = call->conn->cipher; desc.info = iv.x; desc.flags = 0; /* validate the security checksum */ x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); x |= sp->hdr.seq & cpu_to_be32(0x3fffffff); tmpbuf.x[0] = call->call_id; tmpbuf.x[1] = x; sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); y = ntohl(tmpbuf.x[1]); y = (y >> 16) & 0xffff; if (y == 0) y = 1; /* zero checksums are not permitted */ cksum = htons(y); if (sp->hdr.cksum != cksum) { *_abort_code = RXKADSEALEDINCON; _leave(" = -EPROTO [csum failed]"); return -EPROTO; } switch (call->conn->security_level) { case RXRPC_SECURITY_PLAIN: ret = 0; break; case RXRPC_SECURITY_AUTH: ret = rxkad_verify_packet_auth(call, skb, _abort_code); break; case RXRPC_SECURITY_ENCRYPT: ret = rxkad_verify_packet_encrypt(call, skb, _abort_code); break; default: ret = -ENOANO; break; } _leave(" = %d", ret); return ret; } /* * issue a challenge */ static int rxkad_issue_challenge(struct rxrpc_connection *conn) { struct rxkad_challenge challenge; struct rxrpc_header hdr; struct msghdr msg; struct kvec iov[2]; size_t len; int ret; _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); ret = key_validate(conn->key); if (ret < 0) return ret; get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce)); challenge.version = htonl(2); challenge.nonce = htonl(conn->security_nonce); challenge.min_level = htonl(0); challenge.__padding = 0; msg.msg_name = &conn->trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; hdr.epoch = conn->epoch; hdr.cid = conn->cid; hdr.callNumber = 0; hdr.seq = 0; hdr.type = RXRPC_PACKET_TYPE_CHALLENGE; hdr.flags = conn->out_clientflag; hdr.userStatus = 0; hdr.securityIndex = conn->security_ix; hdr._rsvd = 0; hdr.serviceId = conn->service_id; iov[0].iov_base = &hdr; iov[0].iov_len = sizeof(hdr); iov[1].iov_base = &challenge; iov[1].iov_len = sizeof(challenge); len = iov[0].iov_len + iov[1].iov_len; hdr.serial = htonl(atomic_inc_return(&conn->serial)); _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; } _leave(" = 0"); return 0; } /* * send a Kerberos security response */ static int rxkad_send_response(struct rxrpc_connection *conn, struct rxrpc_header *hdr, struct rxkad_response *resp, const struct rxkad_key *s2) { struct msghdr msg; struct kvec iov[3]; size_t len; int ret; _enter(""); msg.msg_name = &conn->trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; hdr->epoch = conn->epoch; hdr->seq = 0; hdr->type = RXRPC_PACKET_TYPE_RESPONSE; hdr->flags = conn->out_clientflag; hdr->userStatus = 0; hdr->_rsvd = 0; iov[0].iov_base = hdr; iov[0].iov_len = sizeof(*hdr); iov[1].iov_base = resp; iov[1].iov_len = sizeof(*resp); iov[2].iov_base = (void *) s2->ticket; iov[2].iov_len = s2->ticket_len; len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; hdr->serial = htonl(atomic_inc_return(&conn->serial)); _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; } _leave(" = 0"); return 0; } /* * calculate the response checksum */ static void rxkad_calc_response_checksum(struct rxkad_response *response) { u32 csum = 1000003; int loop; u8 *p = (u8 *) response; for (loop = sizeof(*response); loop > 0; loop--) csum = csum * 0x10204081 + *p++; response->encrypted.checksum = htonl(csum); } /* * load a scatterlist with a potentially split-page buffer */ static void rxkad_sg_set_buf2(struct scatterlist sg[2], void *buf, size_t buflen) { int nsg = 1; sg_init_table(sg, 2); sg_set_buf(&sg[0], buf, buflen); if (sg[0].offset + buflen > PAGE_SIZE) { /* the buffer was split over two pages */ sg[0].length = PAGE_SIZE - sg[0].offset; sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); nsg++; } sg_mark_end(&sg[nsg - 1]); ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); } /* * encrypt the response packet */ static void rxkad_encrypt_response(struct rxrpc_connection *conn, struct rxkad_response *resp, const struct rxkad_key *s2) { struct blkcipher_desc desc; struct rxrpc_crypt iv; struct scatterlist sg[2]; /* continue encrypting from where we left off */ memcpy(&iv, s2->session_key, sizeof(iv)); desc.tfm = conn->cipher; desc.info = iv.x; desc.flags = 0; rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); } /* * respond to a challenge packet */ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, struct sk_buff *skb, u32 *_abort_code) { const struct rxrpc_key_token *token; struct rxkad_challenge challenge; struct rxkad_response resp __attribute__((aligned(8))); /* must be aligned for crypto */ struct rxrpc_skb_priv *sp; u32 version, nonce, min_level, abort_code; int ret; _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); if (!conn->key) { _leave(" = -EPROTO [no key]"); return -EPROTO; } ret = key_validate(conn->key); if (ret < 0) { *_abort_code = RXKADEXPIRED; return ret; } abort_code = RXKADPACKETSHORT; sp = rxrpc_skb(skb); if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0) goto protocol_error; version = ntohl(challenge.version); nonce = ntohl(challenge.nonce); min_level = ntohl(challenge.min_level); _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }", ntohl(sp->hdr.serial), version, nonce, min_level); abort_code = RXKADINCONSISTENCY; if (version != RXKAD_VERSION) goto protocol_error; abort_code = RXKADLEVELFAIL; if (conn->security_level < min_level) goto protocol_error; token = conn->key->payload.data; /* build the response packet */ memset(&resp, 0, sizeof(resp)); resp.version = RXKAD_VERSION; resp.encrypted.epoch = conn->epoch; resp.encrypted.cid = conn->cid; resp.encrypted.securityIndex = htonl(conn->security_ix); resp.encrypted.call_id[0] = (conn->channels[0] ? conn->channels[0]->call_id : 0); resp.encrypted.call_id[1] = (conn->channels[1] ? conn->channels[1]->call_id : 0); resp.encrypted.call_id[2] = (conn->channels[2] ? conn->channels[2]->call_id : 0); resp.encrypted.call_id[3] = (conn->channels[3] ? conn->channels[3]->call_id : 0); resp.encrypted.inc_nonce = htonl(nonce + 1); resp.encrypted.level = htonl(conn->security_level); resp.kvno = htonl(token->kad->kvno); resp.ticket_len = htonl(token->kad->ticket_len); /* calculate the response checksum and then do the encryption */ rxkad_calc_response_checksum(&resp); rxkad_encrypt_response(conn, &resp, token->kad); return rxkad_send_response(conn, &sp->hdr, &resp, token->kad); protocol_error: *_abort_code = abort_code; _leave(" = -EPROTO [%d]", abort_code); return -EPROTO; } /* * decrypt the kerberos IV ticket in the response */ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, void *ticket, size_t ticket_len, struct rxrpc_crypt *_session_key, time_t *_expiry, u32 *_abort_code) { struct blkcipher_desc desc; struct rxrpc_crypt iv, key; struct scatterlist sg[1]; struct in_addr addr; unsigned life; time_t issue, now; bool little_endian; int ret; u8 *p, *q, *name, *end; _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key)); *_expiry = 0; ret = key_validate(conn->server_key); if (ret < 0) { switch (ret) { case -EKEYEXPIRED: *_abort_code = RXKADEXPIRED; goto error; default: *_abort_code = RXKADNOAUTH; goto error; } } ASSERT(conn->server_key->payload.data != NULL); ASSERTCMP((unsigned long) ticket & 7UL, ==, 0); memcpy(&iv, &conn->server_key->type_data, sizeof(iv)); desc.tfm = conn->server_key->payload.data; desc.info = iv.x; desc.flags = 0; sg_init_one(&sg[0], ticket, ticket_len); crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); p = ticket; end = p + ticket_len; #define Z(size) \ ({ \ u8 *__str = p; \ q = memchr(p, 0, end - p); \ if (!q || q - p > (size)) \ goto bad_ticket; \ for (; p < q; p++) \ if (!isprint(*p)) \ goto bad_ticket; \ p++; \ __str; \ }) /* extract the ticket flags */ _debug("KIV FLAGS: %x", *p); little_endian = *p & 1; p++; /* extract the authentication name */ name = Z(ANAME_SZ); _debug("KIV ANAME: %s", name); /* extract the principal's instance */ name = Z(INST_SZ); _debug("KIV INST : %s", name); /* extract the principal's authentication domain */ name = Z(REALM_SZ); _debug("KIV REALM: %s", name); if (end - p < 4 + 8 + 4 + 2) goto bad_ticket; /* get the IPv4 address of the entity that requested the ticket */ memcpy(&addr, p, sizeof(addr)); p += 4; _debug("KIV ADDR : %pI4", &addr); /* get the session key from the ticket */ memcpy(&key, p, sizeof(key)); p += 8; _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1])); memcpy(_session_key, &key, sizeof(key)); /* get the ticket's lifetime */ life = *p++ * 5 * 60; _debug("KIV LIFE : %u", life); /* get the issue time of the ticket */ if (little_endian) { __le32 stamp; memcpy(&stamp, p, 4); issue = le32_to_cpu(stamp); } else { __be32 stamp; memcpy(&stamp, p, 4); issue = be32_to_cpu(stamp); } p += 4; now = get_seconds(); _debug("KIV ISSUE: %lx [%lx]", issue, now); /* check the ticket is in date */ if (issue > now) { *_abort_code = RXKADNOAUTH; ret = -EKEYREJECTED; goto error; } if (issue < now - life) { *_abort_code = RXKADEXPIRED; ret = -EKEYEXPIRED; goto error; } *_expiry = issue + life; /* get the service name */ name = Z(SNAME_SZ); _debug("KIV SNAME: %s", name); /* get the service instance name */ name = Z(INST_SZ); _debug("KIV SINST: %s", name); ret = 0; error: _leave(" = %d", ret); return ret; bad_ticket: *_abort_code = RXKADBADTICKET; ret = -EBADMSG; goto error; } /* * decrypt the response packet */ static void rxkad_decrypt_response(struct rxrpc_connection *conn, struct rxkad_response *resp, const struct rxrpc_crypt *session_key) { struct blkcipher_desc desc; struct scatterlist sg[2]; struct rxrpc_crypt iv; _enter(",,%08x%08x", ntohl(session_key->n[0]), ntohl(session_key->n[1])); ASSERT(rxkad_ci != NULL); mutex_lock(&rxkad_ci_mutex); if (crypto_blkcipher_setkey(rxkad_ci, session_key->x, sizeof(*session_key)) < 0) BUG(); memcpy(&iv, session_key, sizeof(iv)); desc.tfm = rxkad_ci; desc.info = iv.x; desc.flags = 0; rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); mutex_unlock(&rxkad_ci_mutex); _leave(""); } /* * verify a response */ static int rxkad_verify_response(struct rxrpc_connection *conn, struct sk_buff *skb, u32 *_abort_code) { struct rxkad_response response __attribute__((aligned(8))); /* must be aligned for crypto */ struct rxrpc_skb_priv *sp; struct rxrpc_crypt session_key; time_t expiry; void *ticket; u32 abort_code, version, kvno, ticket_len, level; __be32 csum; int ret; _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); abort_code = RXKADPACKETSHORT; if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0) goto protocol_error; if (!pskb_pull(skb, sizeof(response))) BUG(); version = ntohl(response.version); ticket_len = ntohl(response.ticket_len); kvno = ntohl(response.kvno); sp = rxrpc_skb(skb); _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }", ntohl(sp->hdr.serial), version, kvno, ticket_len); abort_code = RXKADINCONSISTENCY; if (version != RXKAD_VERSION) goto protocol_error; abort_code = RXKADTICKETLEN; if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) goto protocol_error; abort_code = RXKADUNKNOWNKEY; if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) goto protocol_error; /* extract the kerberos ticket and decrypt and decode it */ ticket = kmalloc(ticket_len, GFP_NOFS); if (!ticket) return -ENOMEM; abort_code = RXKADPACKETSHORT; if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0) goto protocol_error_free; ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key, &expiry, &abort_code); if (ret < 0) { *_abort_code = abort_code; kfree(ticket); return ret; } /* use the session key from inside the ticket to decrypt the * response */ rxkad_decrypt_response(conn, &response, &session_key); abort_code = RXKADSEALEDINCON; if (response.encrypted.epoch != conn->epoch) goto protocol_error_free; if (response.encrypted.cid != conn->cid) goto protocol_error_free; if (ntohl(response.encrypted.securityIndex) != conn->security_ix) goto protocol_error_free; csum = response.encrypted.checksum; response.encrypted.checksum = 0; rxkad_calc_response_checksum(&response); if (response.encrypted.checksum != csum) goto protocol_error_free; if (ntohl(response.encrypted.call_id[0]) > INT_MAX || ntohl(response.encrypted.call_id[1]) > INT_MAX || ntohl(response.encrypted.call_id[2]) > INT_MAX || ntohl(response.encrypted.call_id[3]) > INT_MAX) goto protocol_error_free; abort_code = RXKADOUTOFSEQUENCE; if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1)) goto protocol_error_free; abort_code = RXKADLEVELFAIL; level = ntohl(response.encrypted.level); if (level > RXRPC_SECURITY_ENCRYPT) goto protocol_error_free; conn->security_level = level; /* create a key to hold the security data and expiration time - after * this the connection security can be handled in exactly the same way * as for a client connection */ ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); if (ret < 0) { kfree(ticket); return ret; } kfree(ticket); _leave(" = 0"); return 0; protocol_error_free: kfree(ticket); protocol_error: *_abort_code = abort_code; _leave(" = -EPROTO [%d]", abort_code); return -EPROTO; } /* * clear the connection security */ static void rxkad_clear(struct rxrpc_connection *conn) { _enter(""); if (conn->cipher) crypto_free_blkcipher(conn->cipher); } /* * RxRPC Kerberos-based security */ static struct rxrpc_security rxkad = { .owner = THIS_MODULE, .name = "rxkad", .security_index = RXRPC_SECURITY_RXKAD, .init_connection_security = rxkad_init_connection_security, .prime_packet_security = rxkad_prime_packet_security, .secure_packet = rxkad_secure_packet, .verify_packet = rxkad_verify_packet, .issue_challenge = rxkad_issue_challenge, .respond_to_challenge = rxkad_respond_to_challenge, .verify_response = rxkad_verify_response, .clear = rxkad_clear, }; static __init int rxkad_init(void) { _enter(""); /* pin the cipher we need so that the crypto layer doesn't invoke * keventd to go get it */ rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(rxkad_ci)) return PTR_ERR(rxkad_ci); return rxrpc_register_security(&rxkad); } module_init(rxkad_init); static __exit void rxkad_exit(void) { _enter(""); rxrpc_unregister_security(&rxkad); crypto_free_blkcipher(rxkad_ci); } module_exit(rxkad_exit);
gpl-2.0
lookflying/linux-kernel
lib/iommu-helper.c
9346
1029
/* * IOMMU helper functions for the free area management */ #include <linux/export.h> #include <linux/bitmap.h> #include <linux/bug.h> int iommu_is_span_boundary(unsigned int index, unsigned int nr, unsigned long shift, unsigned long boundary_size) { BUG_ON(!is_power_of_2(boundary_size)); shift = (shift + index) & (boundary_size - 1); return shift + nr > boundary_size; } unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long shift, unsigned long boundary_size, unsigned long align_mask) { unsigned long index; /* We don't want the last of the limit */ size -= 1; again: index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); if (index < size) { if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { /* we could do more effectively */ start = index + 1; goto again; } bitmap_set(map, index, nr); return index; } return -1; } EXPORT_SYMBOL(iommu_area_alloc);
gpl-2.0
fbocharov/au-linux-kernel-spring-2016
linux/drivers/bluetooth/btsdio.c
131
8254
/* * * Generic Bluetooth SDIO driver * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * Copyright (C) 2007 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.1" static const struct sdio_device_id btsdio_table[] = { /* Generic Bluetooth Type-A SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) }, /* Generic Bluetooth Type-B SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, /* Generic Bluetooth AMP controller */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btsdio_table); struct btsdio_data { struct hci_dev *hdev; struct sdio_func *func; struct work_struct work; struct sk_buff_head txq; }; #define REG_RDAT 0x00 /* Receiver Data */ #define REG_TDAT 0x00 /* Transmitter Data */ #define REG_PC_RRT 0x10 /* Read Packet Control */ #define REG_PC_WRT 0x11 /* Write Packet Control */ #define REG_RTC_STAT 0x12 /* Retry Control Status */ #define REG_RTC_SET 0x12 /* Retry Control Set */ #define REG_INTRD 0x13 /* Interrupt Indication */ #define REG_CL_INTRD 0x13 /* Interrupt Clear */ #define REG_EN_INTRD 0x14 /* Interrupt Enable */ #define REG_MD_STAT 0x20 /* Bluetooth Mode Status */ #define REG_MD_SET 0x20 /* Bluetooth Mode Set */ static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) { int err; BT_DBG("%s", data->hdev->name); /* Prepend Type-A header */ skb_push(skb, 4); skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = bt_cb(skb)->pkt_type; err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); if (err < 0) { skb_pull(skb, 4); sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL); return err; } data->hdev->stat.byte_tx += skb->len; kfree_skb(skb); return 0; } static void btsdio_work(struct work_struct *work) { struct btsdio_data *data = container_of(work, struct btsdio_data, work); struct sk_buff *skb; int err; BT_DBG("%s", data->hdev->name); sdio_claim_host(data->func); while ((skb = skb_dequeue(&data->txq))) { err = btsdio_tx_packet(data, skb); if (err < 0) { data->hdev->stat.err_tx++; skb_queue_head(&data->txq, skb); break; } } sdio_release_host(data->func); } static int btsdio_rx_packet(struct btsdio_data *data) { u8 hdr[4] __attribute__ ((aligned(4))); struct sk_buff *skb; int err, len; BT_DBG("%s", data->hdev->name); err = sdio_readsb(data->func, hdr, REG_RDAT, 4); if (err < 0) return err; len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16); if (len < 4 || len > 65543) return -EILSEQ; skb = bt_skb_alloc(len - 4, GFP_KERNEL); if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time * we're called we'll have more memory. */ return -ENOMEM; } skb_put(skb, len - 4); err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); if (err < 0) { kfree_skb(skb); return err; } data->hdev->stat.byte_rx += len; bt_cb(skb)->pkt_type = hdr[3]; err = hci_recv_frame(data->hdev, skb); if (err < 0) return err; sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); return 0; } static void btsdio_interrupt(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); int intrd; BT_DBG("%s", data->hdev->name); intrd = sdio_readb(func, REG_INTRD, NULL); if (intrd & 0x01) { sdio_writeb(func, 0x01, REG_CL_INTRD, NULL); if (btsdio_rx_packet(data) < 0) { data->hdev->stat.err_rx++; sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL); } } } static int btsdio_open(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); sdio_claim_host(data->func); err = sdio_enable_func(data->func); if (err < 0) goto release; err = sdio_claim_irq(data->func, btsdio_interrupt); if (err < 0) { sdio_disable_func(data->func); goto release; } if (data->func->class == SDIO_CLASS_BT_B) sdio_writeb(data->func, 0x00, REG_MD_SET, NULL); sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL); release: sdio_release_host(data->func); return err; } static int btsdio_close(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); sdio_claim_host(data->func); sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL); sdio_release_irq(data->func); sdio_disable_func(data->func); sdio_release_host(data->func); return 0; } static int btsdio_flush(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); skb_queue_purge(&data->txq); return 0; } static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; default: return -EILSEQ; } skb_queue_tail(&data->txq, skb); schedule_work(&data->work); return 0; } static int btsdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct btsdio_data *data; struct hci_dev *hdev; struct sdio_func_tuple *tuple = func->tuples; int err; BT_DBG("func %p id %p class 0x%04x", func, id, func->class); while (tuple) { BT_DBG("code 0x%x size %d", tuple->code, tuple->size); tuple = tuple->next; } data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->func = func; INIT_WORK(&data->work, btsdio_work); skb_queue_head_init(&data->txq); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_SDIO; hci_set_drvdata(hdev, data); if (id->class == SDIO_CLASS_BT_AMP) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_BREDR; data->hdev = hdev; SET_HCIDEV_DEV(hdev, &func->dev); hdev->open = btsdio_open; hdev->close = btsdio_close; hdev->flush = btsdio_flush; hdev->send = btsdio_send_frame; if (func->vendor == 0x0104 && func->device == 0x00c5) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } sdio_set_drvdata(func, data); return 0; } static void btsdio_remove(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); struct hci_dev *hdev; BT_DBG("func %p", func); if (!data) return; hdev = data->hdev; sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); } static struct sdio_driver btsdio_driver = { .name = "btsdio", .probe = btsdio_probe, .remove = btsdio_remove, .id_table = btsdio_table, }; static int __init btsdio_init(void) { BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION); return sdio_register_driver(&btsdio_driver); } static void __exit btsdio_exit(void) { sdio_unregister_driver(&btsdio_driver); } module_init(btsdio_init); module_exit(btsdio_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
bigzz/s3c-kernel
arch/arm/mach-mx3/mach-mx35_3ds.c
131
3206
/* * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. * * Author: Fabio Estevam <fabio.estevam@freescale.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This machine is known as: * - i.MX35 3-Stack Development System * - i.MX35 Platform Development Kit (i.MX35 PDK) */ #include <linux/types.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/memory.h> #include <linux/gpio.h> #include <linux/fsl_devices.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-mx35.h> #include "devices-imx35.h" #include "devices.h" static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static struct platform_device *devices[] __initdata = { &mxc_fec_device, }; static struct pad_desc mx35pdk_pads[] = { /* UART1 */ MX35_PAD_CTS1__UART1_CTS, MX35_PAD_RTS1__UART1_RTS, MX35_PAD_TXD1__UART1_TXD_MUX, MX35_PAD_RXD1__UART1_RXD_MUX, /* FEC */ MX35_PAD_FEC_TX_CLK__FEC_TX_CLK, MX35_PAD_FEC_RX_CLK__FEC_RX_CLK, MX35_PAD_FEC_RX_DV__FEC_RX_DV, MX35_PAD_FEC_COL__FEC_COL, MX35_PAD_FEC_RDATA0__FEC_RDATA_0, MX35_PAD_FEC_TDATA0__FEC_TDATA_0, MX35_PAD_FEC_TX_EN__FEC_TX_EN, MX35_PAD_FEC_MDC__FEC_MDC, MX35_PAD_FEC_MDIO__FEC_MDIO, MX35_PAD_FEC_TX_ERR__FEC_TX_ERR, MX35_PAD_FEC_RX_ERR__FEC_RX_ERR, MX35_PAD_FEC_CRS__FEC_CRS, MX35_PAD_FEC_RDATA1__FEC_RDATA_1, MX35_PAD_FEC_TDATA1__FEC_TDATA_1, MX35_PAD_FEC_RDATA2__FEC_RDATA_2, MX35_PAD_FEC_TDATA2__FEC_TDATA_2, MX35_PAD_FEC_RDATA3__FEC_RDATA_3, MX35_PAD_FEC_TDATA3__FEC_TDATA_3, /* USBOTG */ MX35_PAD_USBOTG_PWR__USB_TOP_USBOTG_PWR, MX35_PAD_USBOTG_OC__USB_TOP_USBOTG_OC, }; /* OTG config */ static struct fsl_usb2_platform_data usb_pdata = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_UTMI_WIDE, }; /* * Board specific initialization. */ static void __init mxc_board_init(void) { mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads)); platform_add_devices(devices, ARRAY_SIZE(devices)); imx35_add_imx_uart0(&uart_pdata); mxc_register_device(&mxc_otg_udc_device, &usb_pdata); } static void __init mx35pdk_timer_init(void) { mx35_clocks_init(); } struct sys_timer mx35pdk_timer = { .init = mx35pdk_timer_init, }; MACHINE_START(MX35_3DS, "Freescale MX35PDK") /* Maintainer: Freescale Semiconductor, Inc */ .phys_io = MX35_AIPS1_BASE_ADDR, .io_pg_offst = ((MX35_AIPS1_BASE_ADDR_VIRT) >> 18) & 0xfffc, .boot_params = MX3x_PHYS_OFFSET + 0x100, .map_io = mx35_map_io, .init_irq = mx35_init_irq, .init_machine = mxc_board_init, .timer = &mx35pdk_timer, MACHINE_END
gpl-2.0
songbaby/linux
net/vmw_vsock/vmci_transport.c
131
59734
/* * VMware vSockets Driver * * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/cred.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/net.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/smp.h> #include <linux/socket.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <net/sock.h> #include <net/af_vsock.h> #include "vmci_transport_notify.h" static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg); static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg); static void vmci_transport_peer_attach_cb(u32 sub_id, const struct vmci_event_data *ed, void *client_data); static void vmci_transport_peer_detach_cb(u32 sub_id, const struct vmci_event_data *ed, void *client_data); static void vmci_transport_recv_pkt_work(struct work_struct *work); static int vmci_transport_recv_listen(struct sock *sk, struct vmci_transport_packet *pkt); static int vmci_transport_recv_connecting_server( struct sock *sk, struct sock *pending, struct vmci_transport_packet *pkt); static int vmci_transport_recv_connecting_client( struct sock *sk, struct vmci_transport_packet *pkt); static int vmci_transport_recv_connecting_client_negotiate( struct sock *sk, struct vmci_transport_packet *pkt); static int vmci_transport_recv_connecting_client_invalid( struct sock *sk, struct vmci_transport_packet *pkt); static int vmci_transport_recv_connected(struct sock *sk, struct vmci_transport_packet *pkt); static bool vmci_transport_old_proto_override(bool *old_pkt_proto); static u16 vmci_transport_new_proto_supported_versions(void); static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto, bool old_pkt_proto); struct vmci_transport_recv_pkt_info { struct work_struct work; struct sock *sk; struct vmci_transport_packet pkt; }; static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID, VMCI_INVALID_ID }; static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; static int PROTOCOL_OVERRIDE = -1; #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN 128 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE 262144 #define VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX 262144 /* The default peer timeout indicates how long we will wait for a peer response * to a control message. */ #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) #define SS_LISTEN 255 /* Helper function to convert from a VMCI error code to a VSock error code. */ static s32 vmci_transport_error_to_vsock_error(s32 vmci_error) { int err; switch (vmci_error) { case VMCI_ERROR_NO_MEM: err = ENOMEM; break; case VMCI_ERROR_DUPLICATE_ENTRY: case VMCI_ERROR_ALREADY_EXISTS: err = EADDRINUSE; break; case VMCI_ERROR_NO_ACCESS: err = EPERM; break; case VMCI_ERROR_NO_RESOURCES: err = ENOBUFS; break; case VMCI_ERROR_INVALID_RESOURCE: err = EHOSTUNREACH; break; case VMCI_ERROR_INVALID_ARGS: default: err = EINVAL; } return err > 0 ? -err : err; } static u32 vmci_transport_peer_rid(u32 peer_cid) { if (VMADDR_CID_HYPERVISOR == peer_cid) return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID; return VMCI_TRANSPORT_PACKET_RID; } static inline void vmci_transport_packet_init(struct vmci_transport_packet *pkt, struct sockaddr_vm *src, struct sockaddr_vm *dst, u8 type, u64 size, u64 mode, struct vmci_transport_waiting_info *wait, u16 proto, struct vmci_handle handle) { /* We register the stream control handler as an any cid handle so we * must always send from a source address of VMADDR_CID_ANY */ pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY, VMCI_TRANSPORT_PACKET_RID); pkt->dg.dst = vmci_make_handle(dst->svm_cid, vmci_transport_peer_rid(dst->svm_cid)); pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg); pkt->version = VMCI_TRANSPORT_PACKET_VERSION; pkt->type = type; pkt->src_port = src->svm_port; pkt->dst_port = dst->svm_port; memset(&pkt->proto, 0, sizeof(pkt->proto)); memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2)); switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_INVALID: pkt->u.size = 0; break; case VMCI_TRANSPORT_PACKET_TYPE_REQUEST: case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE: pkt->u.size = size; break; case VMCI_TRANSPORT_PACKET_TYPE_OFFER: case VMCI_TRANSPORT_PACKET_TYPE_ATTACH: pkt->u.handle = handle; break; case VMCI_TRANSPORT_PACKET_TYPE_WROTE: case VMCI_TRANSPORT_PACKET_TYPE_READ: case VMCI_TRANSPORT_PACKET_TYPE_RST: pkt->u.size = 0; break; case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN: pkt->u.mode = mode; break; case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ: case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE: memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait)); break; case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2: case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2: pkt->u.size = size; pkt->proto = proto; break; } } static inline void vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt, struct sockaddr_vm *local, struct sockaddr_vm *remote) { vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port); vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port); } static int __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt, struct sockaddr_vm *src, struct sockaddr_vm *dst, enum vmci_transport_packet_type type, u64 size, u64 mode, struct vmci_transport_waiting_info *wait, u16 proto, struct vmci_handle handle, bool convert_error) { int err; vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait, proto, handle); err = vmci_datagram_send(&pkt->dg); if (convert_error && (err < 0)) return vmci_transport_error_to_vsock_error(err); return err; } static int vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt, enum vmci_transport_packet_type type, u64 size, u64 mode, struct vmci_transport_waiting_info *wait, struct vmci_handle handle) { struct vmci_transport_packet reply; struct sockaddr_vm src, dst; if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) { return 0; } else { vmci_transport_packet_get_addresses(pkt, &src, &dst); return __vmci_transport_send_control_pkt(&reply, &src, &dst, type, size, mode, wait, VSOCK_PROTO_INVALID, handle, true); } } static int vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src, struct sockaddr_vm *dst, enum vmci_transport_packet_type type, u64 size, u64 mode, struct vmci_transport_waiting_info *wait, struct vmci_handle handle) { /* Note that it is safe to use a single packet across all CPUs since * two tasklets of the same type are guaranteed to not ever run * simultaneously. If that ever changes, or VMCI stops using tasklets, * we can use per-cpu packets. */ static struct vmci_transport_packet pkt; return __vmci_transport_send_control_pkt(&pkt, src, dst, type, size, mode, wait, VSOCK_PROTO_INVALID, handle, false); } static int vmci_transport_send_control_pkt(struct sock *sk, enum vmci_transport_packet_type type, u64 size, u64 mode, struct vmci_transport_waiting_info *wait, u16 proto, struct vmci_handle handle) { struct vmci_transport_packet *pkt; struct vsock_sock *vsk; int err; vsk = vsock_sk(sk); if (!vsock_addr_bound(&vsk->local_addr)) return -EINVAL; if (!vsock_addr_bound(&vsk->remote_addr)) return -EINVAL; pkt = kmalloc(sizeof(*pkt), GFP_KERNEL); if (!pkt) return -ENOMEM; err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr, &vsk->remote_addr, type, size, mode, wait, proto, handle, true); kfree(pkt); return err; } static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst, struct sockaddr_vm *src, struct vmci_transport_packet *pkt) { if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) return 0; return vmci_transport_send_control_pkt_bh( dst, src, VMCI_TRANSPORT_PACKET_TYPE_RST, 0, 0, NULL, VMCI_INVALID_HANDLE); } static int vmci_transport_send_reset(struct sock *sk, struct vmci_transport_packet *pkt) { if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) return 0; return vmci_transport_send_control_pkt(sk, VMCI_TRANSPORT_PACKET_TYPE_RST, 0, 0, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } static int vmci_transport_send_negotiate(struct sock *sk, size_t size) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE, size, 0, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } static int vmci_transport_send_negotiate2(struct sock *sk, size_t size, u16 version) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2, size, 0, NULL, version, VMCI_INVALID_HANDLE); } static int vmci_transport_send_qp_offer(struct sock *sk, struct vmci_handle handle) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0, 0, NULL, VSOCK_PROTO_INVALID, handle); } static int vmci_transport_send_attach(struct sock *sk, struct vmci_handle handle) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH, 0, 0, NULL, VSOCK_PROTO_INVALID, handle); } static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt) { return vmci_transport_reply_control_pkt_fast( pkt, VMCI_TRANSPORT_PACKET_TYPE_RST, 0, 0, NULL, VMCI_INVALID_HANDLE); } static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst, struct sockaddr_vm *src) { return vmci_transport_send_control_pkt_bh( dst, src, VMCI_TRANSPORT_PACKET_TYPE_INVALID, 0, 0, NULL, VMCI_INVALID_HANDLE); } int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst, struct sockaddr_vm *src) { return vmci_transport_send_control_pkt_bh( dst, src, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0, 0, NULL, VMCI_INVALID_HANDLE); } int vmci_transport_send_read_bh(struct sockaddr_vm *dst, struct sockaddr_vm *src) { return vmci_transport_send_control_pkt_bh( dst, src, VMCI_TRANSPORT_PACKET_TYPE_READ, 0, 0, NULL, VMCI_INVALID_HANDLE); } int vmci_transport_send_wrote(struct sock *sk) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0, 0, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } int vmci_transport_send_read(struct sock *sk) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0, 0, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } int vmci_transport_send_waiting_write(struct sock *sk, struct vmci_transport_waiting_info *wait) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE, 0, 0, wait, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } int vmci_transport_send_waiting_read(struct sock *sk, struct vmci_transport_waiting_info *wait) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ, 0, 0, wait, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode) { return vmci_transport_send_control_pkt( &vsk->sk, VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN, 0, mode, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } static int vmci_transport_send_conn_request(struct sock *sk, size_t size) { return vmci_transport_send_control_pkt(sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST, size, 0, NULL, VSOCK_PROTO_INVALID, VMCI_INVALID_HANDLE); } static int vmci_transport_send_conn_request2(struct sock *sk, size_t size, u16 version) { return vmci_transport_send_control_pkt( sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2, size, 0, NULL, version, VMCI_INVALID_HANDLE); } static struct sock *vmci_transport_get_pending( struct sock *listener, struct vmci_transport_packet *pkt) { struct vsock_sock *vlistener; struct vsock_sock *vpending; struct sock *pending; struct sockaddr_vm src; vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); vlistener = vsock_sk(listener); list_for_each_entry(vpending, &vlistener->pending_links, pending_links) { if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && pkt->dst_port == vpending->local_addr.svm_port) { pending = sk_vsock(vpending); sock_hold(pending); goto found; } } pending = NULL; found: return pending; } static void vmci_transport_release_pending(struct sock *pending) { sock_put(pending); } /* We allow two kinds of sockets to communicate with a restricted VM: 1) * trusted sockets 2) sockets from applications running as the same user as the * VM (this is only true for the host side and only when using hosted products) */ static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid) { return vsock->trusted || vmci_is_context_owner(peer_cid, vsock->owner->uid); } /* We allow sending datagrams to and receiving datagrams from a restricted VM * only if it is trusted as described in vmci_transport_is_trusted. */ static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid) { if (VMADDR_CID_HYPERVISOR == peer_cid) return true; if (vsock->cached_peer != peer_cid) { vsock->cached_peer = peer_cid; if (!vmci_transport_is_trusted(vsock, peer_cid) && (vmci_context_get_priv_flags(peer_cid) & VMCI_PRIVILEGE_FLAG_RESTRICTED)) { vsock->cached_peer_allow_dgram = false; } else { vsock->cached_peer_allow_dgram = true; } } return vsock->cached_peer_allow_dgram; } static int vmci_transport_queue_pair_alloc(struct vmci_qp **qpair, struct vmci_handle *handle, u64 produce_size, u64 consume_size, u32 peer, u32 flags, bool trusted) { int err = 0; if (trusted) { /* Try to allocate our queue pair as trusted. This will only * work if vsock is running in the host. */ err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size, peer, flags, VMCI_PRIVILEGE_FLAG_TRUSTED); if (err != VMCI_ERROR_NO_ACCESS) goto out; } err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size, peer, flags, VMCI_NO_PRIVILEGE_FLAGS); out: if (err < 0) { pr_err("Could not attach to queue pair with %d\n", err); err = vmci_transport_error_to_vsock_error(err); } return err; } static int vmci_transport_datagram_create_hnd(u32 resource_id, u32 flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle) { int err = 0; /* Try to allocate our datagram handler as trusted. This will only work * if vsock is running in the host. */ err = vmci_datagram_create_handle_priv(resource_id, flags, VMCI_PRIVILEGE_FLAG_TRUSTED, recv_cb, client_data, out_handle); if (err == VMCI_ERROR_NO_ACCESS) err = vmci_datagram_create_handle(resource_id, flags, recv_cb, client_data, out_handle); return err; } /* This is invoked as part of a tasklet that's scheduled when the VMCI * interrupt fires. This is run in bottom-half context and if it ever needs to * sleep it should defer that work to a work queue. */ static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg) { struct sock *sk; size_t size; struct sk_buff *skb; struct vsock_sock *vsk; sk = (struct sock *)data; /* This handler is privileged when this module is running on the host. * We will get datagrams from all endpoints (even VMs that are in a * restricted context). If we get one from a restricted context then * the destination socket must be trusted. * * NOTE: We access the socket struct without holding the lock here. * This is ok because the field we are interested is never modified * outside of the create and destruct socket functions. */ vsk = vsock_sk(sk); if (!vmci_transport_allow_dgram(vsk, dg->src.context)) return VMCI_ERROR_NO_ACCESS; size = VMCI_DG_SIZE(dg); /* Attach the packet to the socket's receive queue as an sk_buff. */ skb = alloc_skb(size, GFP_ATOMIC); if (!skb) return VMCI_ERROR_NO_MEM; /* sk_receive_skb() will do a sock_put(), so hold here. */ sock_hold(sk); skb_put(skb, size); memcpy(skb->data, dg, size); sk_receive_skb(sk, skb, 0); return VMCI_SUCCESS; } static bool vmci_transport_stream_allow(u32 cid, u32 port) { static const u32 non_socket_contexts[] = { VMADDR_CID_RESERVED, }; int i; BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts)); for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) { if (cid == non_socket_contexts[i]) return false; } return true; } /* This is invoked as part of a tasklet that's scheduled when the VMCI * interrupt fires. This is run in bottom-half context but it defers most of * its work to the packet handling work queue. */ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) { struct sock *sk; struct sockaddr_vm dst; struct sockaddr_vm src; struct vmci_transport_packet *pkt; struct vsock_sock *vsk; bool bh_process_pkt; int err; sk = NULL; err = VMCI_SUCCESS; bh_process_pkt = false; /* Ignore incoming packets from contexts without sockets, or resources * that aren't vsock implementations. */ if (!vmci_transport_stream_allow(dg->src.context, -1) || vmci_transport_peer_rid(dg->src.context) != dg->src.resource) return VMCI_ERROR_NO_ACCESS; if (VMCI_DG_SIZE(dg) < sizeof(*pkt)) /* Drop datagrams that do not contain full VSock packets. */ return VMCI_ERROR_INVALID_ARGS; pkt = (struct vmci_transport_packet *)dg; /* Find the socket that should handle this packet. First we look for a * connected socket and if there is none we look for a socket bound to * the destintation address. */ vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); sk = vsock_find_connected_socket(&src, &dst); if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { /* We could not find a socket for this specified * address. If this packet is a RST, we just drop it. * If it is another packet, we send a RST. Note that * we do not send a RST reply to RSTs so that we do not * continually send RSTs between two endpoints. * * Note that since this is a reply, dst is src and src * is dst. */ if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0) pr_err("unable to send reset\n"); err = VMCI_ERROR_NOT_FOUND; goto out; } } /* If the received packet type is beyond all types known to this * implementation, reply with an invalid message. Hopefully this will * help when implementing backwards compatibility in the future. */ if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) { vmci_transport_send_invalid_bh(&dst, &src); err = VMCI_ERROR_INVALID_ARGS; goto out; } /* This handler is privileged when this module is running on the host. * We will get datagram connect requests from all endpoints (even VMs * that are in a restricted context). If we get one from a restricted * context then the destination socket must be trusted. * * NOTE: We access the socket struct without holding the lock here. * This is ok because the field we are interested is never modified * outside of the create and destruct socket functions. */ vsk = vsock_sk(sk); if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) { err = VMCI_ERROR_NO_ACCESS; goto out; } /* We do most everything in a work queue, but let's fast path the * notification of reads and writes to help data transfer performance. * We can only do this if there is no process context code executing * for this socket since that may change the state. */ bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { /* The local context ID may be out of date, update it. */ vsk->local_addr.svm_cid = dst.svm_cid; if (sk->sk_state == SS_CONNECTED) vmci_trans(vsk)->notify_ops->handle_notify_pkt( sk, pkt, true, &dst, &src, &bh_process_pkt); } bh_unlock_sock(sk); if (!bh_process_pkt) { struct vmci_transport_recv_pkt_info *recv_pkt_info; recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC); if (!recv_pkt_info) { if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0) pr_err("unable to send reset\n"); err = VMCI_ERROR_NO_MEM; goto out; } recv_pkt_info->sk = sk; memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt)); INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work); schedule_work(&recv_pkt_info->work); /* Clear sk so that the reference count incremented by one of * the Find functions above is not decremented below. We need * that reference count for the packet handler we've scheduled * to run. */ sk = NULL; } out: if (sk) sock_put(sk); return err; } static void vmci_transport_peer_attach_cb(u32 sub_id, const struct vmci_event_data *e_data, void *client_data) { struct sock *sk = client_data; const struct vmci_event_payload_qp *e_payload; struct vsock_sock *vsk; e_payload = vmci_event_data_const_payload(e_data); vsk = vsock_sk(sk); /* We don't ask for delayed CBs when we subscribe to this event (we * pass 0 as flags to vmci_event_subscribe()). VMCI makes no * guarantees in that case about what context we might be running in, * so it could be BH or process, blockable or non-blockable. So we * need to account for all possible contexts here. */ local_bh_disable(); bh_lock_sock(sk); /* XXX This is lame, we should provide a way to lookup sockets by * qp_handle. */ if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle, e_payload->handle)) { /* XXX This doesn't do anything, but in the future we may want * to set a flag here to verify the attach really did occur and * we weren't just sent a datagram claiming it was. */ goto out; } out: bh_unlock_sock(sk); local_bh_enable(); } static void vmci_transport_handle_detach(struct sock *sk) { struct vsock_sock *vsk; vsk = vsock_sk(sk); if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) { sock_set_flag(sk, SOCK_DONE); /* On a detach the peer will not be sending or receiving * anymore. */ vsk->peer_shutdown = SHUTDOWN_MASK; /* We should not be sending anymore since the peer won't be * there to receive, but we can still receive if there is data * left in our consume queue. */ if (vsock_stream_has_data(vsk) <= 0) { if (sk->sk_state == SS_CONNECTING) { /* The peer may detach from a queue pair while * we are still in the connecting state, i.e., * if the peer VM is killed after attaching to * a queue pair, but before we complete the * handshake. In that case, we treat the detach * event like a reset. */ sk->sk_state = SS_UNCONNECTED; sk->sk_err = ECONNRESET; sk->sk_error_report(sk); return; } sk->sk_state = SS_UNCONNECTED; } sk->sk_state_change(sk); } } static void vmci_transport_peer_detach_cb(u32 sub_id, const struct vmci_event_data *e_data, void *client_data) { struct sock *sk = client_data; const struct vmci_event_payload_qp *e_payload; struct vsock_sock *vsk; e_payload = vmci_event_data_const_payload(e_data); vsk = vsock_sk(sk); if (vmci_handle_is_invalid(e_payload->handle)) return; /* Same rules for locking as for peer_attach_cb(). */ local_bh_disable(); bh_lock_sock(sk); /* XXX This is lame, we should provide a way to lookup sockets by * qp_handle. */ if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle, e_payload->handle)) vmci_transport_handle_detach(sk); bh_unlock_sock(sk); local_bh_enable(); } static void vmci_transport_qp_resumed_cb(u32 sub_id, const struct vmci_event_data *e_data, void *client_data) { vsock_for_each_connected_socket(vmci_transport_handle_detach); } static void vmci_transport_recv_pkt_work(struct work_struct *work) { struct vmci_transport_recv_pkt_info *recv_pkt_info; struct vmci_transport_packet *pkt; struct sock *sk; recv_pkt_info = container_of(work, struct vmci_transport_recv_pkt_info, work); sk = recv_pkt_info->sk; pkt = &recv_pkt_info->pkt; lock_sock(sk); /* The local context ID may be out of date. */ vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; switch (sk->sk_state) { case SS_LISTEN: vmci_transport_recv_listen(sk, pkt); break; case SS_CONNECTING: /* Processing of pending connections for servers goes through * the listening socket, so see vmci_transport_recv_listen() * for that path. */ vmci_transport_recv_connecting_client(sk, pkt); break; case SS_CONNECTED: vmci_transport_recv_connected(sk, pkt); break; default: /* Because this function does not run in the same context as * vmci_transport_recv_stream_cb it is possible that the * socket has closed. We need to let the other side know or it * could be sitting in a connect and hang forever. Send a * reset to prevent that. */ vmci_transport_send_reset(sk, pkt); break; } release_sock(sk); kfree(recv_pkt_info); /* Release reference obtained in the stream callback when we fetched * this socket out of the bound or connected list. */ sock_put(sk); } static int vmci_transport_recv_listen(struct sock *sk, struct vmci_transport_packet *pkt) { struct sock *pending; struct vsock_sock *vpending; int err; u64 qp_size; bool old_request = false; bool old_pkt_proto = false; err = 0; /* Because we are in the listen state, we could be receiving a packet * for ourself or any previous connection requests that we received. * If it's the latter, we try to find a socket in our list of pending * connections and, if we do, call the appropriate handler for the * state that that socket is in. Otherwise we try to service the * connection request. */ pending = vmci_transport_get_pending(sk, pkt); if (pending) { lock_sock(pending); /* The local context ID may be out of date. */ vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; switch (pending->sk_state) { case SS_CONNECTING: err = vmci_transport_recv_connecting_server(sk, pending, pkt); break; default: vmci_transport_send_reset(pending, pkt); err = -EINVAL; } if (err < 0) vsock_remove_pending(sk, pending); release_sock(pending); vmci_transport_release_pending(pending); return err; } /* The listen state only accepts connection requests. Reply with a * reset unless we received a reset. */ if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST || pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) { vmci_transport_reply_reset(pkt); return -EINVAL; } if (pkt->u.size == 0) { vmci_transport_reply_reset(pkt); return -EINVAL; } /* If this socket can't accommodate this connection request, we send a * reset. Otherwise we create and initialize a child socket and reply * with a connection negotiation. */ if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) { vmci_transport_reply_reset(pkt); return -ECONNREFUSED; } pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL, sk->sk_type); if (!pending) { vmci_transport_send_reset(sk, pkt); return -ENOMEM; } vpending = vsock_sk(pending); vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context, pkt->dst_port); vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context, pkt->src_port); /* If the proposed size fits within our min/max, accept it. Otherwise * propose our own size. */ if (pkt->u.size >= vmci_trans(vpending)->queue_pair_min_size && pkt->u.size <= vmci_trans(vpending)->queue_pair_max_size) { qp_size = pkt->u.size; } else { qp_size = vmci_trans(vpending)->queue_pair_size; } /* Figure out if we are using old or new requests based on the * overrides pkt types sent by our peer. */ if (vmci_transport_old_proto_override(&old_pkt_proto)) { old_request = old_pkt_proto; } else { if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST) old_request = true; else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2) old_request = false; } if (old_request) { /* Handle a REQUEST (or override) */ u16 version = VSOCK_PROTO_INVALID; if (vmci_transport_proto_to_notify_struct( pending, &version, true)) err = vmci_transport_send_negotiate(pending, qp_size); else err = -EINVAL; } else { /* Handle a REQUEST2 (or override) */ int proto_int = pkt->proto; int pos; u16 active_proto_version = 0; /* The list of possible protocols is the intersection of all * protocols the client supports ... plus all the protocols we * support. */ proto_int &= vmci_transport_new_proto_supported_versions(); /* We choose the highest possible protocol version and use that * one. */ pos = fls(proto_int); if (pos) { active_proto_version = (1 << (pos - 1)); if (vmci_transport_proto_to_notify_struct( pending, &active_proto_version, false)) err = vmci_transport_send_negotiate2(pending, qp_size, active_proto_version); else err = -EINVAL; } else { err = -EINVAL; } } if (err < 0) { vmci_transport_send_reset(sk, pkt); sock_put(pending); err = vmci_transport_error_to_vsock_error(err); goto out; } vsock_add_pending(sk, pending); sk->sk_ack_backlog++; pending->sk_state = SS_CONNECTING; vmci_trans(vpending)->produce_size = vmci_trans(vpending)->consume_size = qp_size; vmci_trans(vpending)->queue_pair_size = qp_size; vmci_trans(vpending)->notify_ops->process_request(pending); /* We might never receive another message for this socket and it's not * connected to any process, so we have to ensure it gets cleaned up * ourself. Our delayed work function will take care of that. Note * that we do not ever cancel this function since we have few * guarantees about its state when calling cancel_delayed_work(). * Instead we hold a reference on the socket for that function and make * it capable of handling cases where it needs to do nothing but * release that reference. */ vpending->listener = sk; sock_hold(sk); sock_hold(pending); INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); schedule_delayed_work(&vpending->dwork, HZ); out: return err; } static int vmci_transport_recv_connecting_server(struct sock *listener, struct sock *pending, struct vmci_transport_packet *pkt) { struct vsock_sock *vpending; struct vmci_handle handle; struct vmci_qp *qpair; bool is_local; u32 flags; u32 detach_sub_id; int err; int skerr; vpending = vsock_sk(pending); detach_sub_id = VMCI_INVALID_ID; switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_OFFER: if (vmci_handle_is_invalid(pkt->u.handle)) { vmci_transport_send_reset(pending, pkt); skerr = EPROTO; err = -EINVAL; goto destroy; } break; default: /* Close and cleanup the connection. */ vmci_transport_send_reset(pending, pkt); skerr = EPROTO; err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL; goto destroy; } /* In order to complete the connection we need to attach to the offered * queue pair and send an attach notification. We also subscribe to the * detach event so we know when our peer goes away, and we do that * before attaching so we don't miss an event. If all this succeeds, * we update our state and wakeup anything waiting in accept() for a * connection. */ /* We don't care about attach since we ensure the other side has * attached by specifying the ATTACH_ONLY flag below. */ err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH, vmci_transport_peer_detach_cb, pending, &detach_sub_id); if (err < VMCI_SUCCESS) { vmci_transport_send_reset(pending, pkt); err = vmci_transport_error_to_vsock_error(err); skerr = -err; goto destroy; } vmci_trans(vpending)->detach_sub_id = detach_sub_id; /* Now attach to the queue pair the client created. */ handle = pkt->u.handle; /* vpending->local_addr always has a context id so we do not need to * worry about VMADDR_CID_ANY in this case. */ is_local = vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid; flags = VMCI_QPFLAG_ATTACH_ONLY; flags |= is_local ? VMCI_QPFLAG_LOCAL : 0; err = vmci_transport_queue_pair_alloc( &qpair, &handle, vmci_trans(vpending)->produce_size, vmci_trans(vpending)->consume_size, pkt->dg.src.context, flags, vmci_transport_is_trusted( vpending, vpending->remote_addr.svm_cid)); if (err < 0) { vmci_transport_send_reset(pending, pkt); skerr = -err; goto destroy; } vmci_trans(vpending)->qp_handle = handle; vmci_trans(vpending)->qpair = qpair; /* When we send the attach message, we must be ready to handle incoming * control messages on the newly connected socket. So we move the * pending socket to the connected state before sending the attach * message. Otherwise, an incoming packet triggered by the attach being * received by the peer may be processed concurrently with what happens * below after sending the attach message, and that incoming packet * will find the listening socket instead of the (currently) pending * socket. Note that enqueueing the socket increments the reference * count, so even if a reset comes before the connection is accepted, * the socket will be valid until it is removed from the queue. * * If we fail sending the attach below, we remove the socket from the * connected list and move the socket to SS_UNCONNECTED before * releasing the lock, so a pending slow path processing of an incoming * packet will not see the socket in the connected state in that case. */ pending->sk_state = SS_CONNECTED; vsock_insert_connected(vpending); /* Notify our peer of our attach. */ err = vmci_transport_send_attach(pending, handle); if (err < 0) { vsock_remove_connected(vpending); pr_err("Could not send attach\n"); vmci_transport_send_reset(pending, pkt); err = vmci_transport_error_to_vsock_error(err); skerr = -err; goto destroy; } /* We have a connection. Move the now connected socket from the * listener's pending list to the accept queue so callers of accept() * can find it. */ vsock_remove_pending(listener, pending); vsock_enqueue_accept(listener, pending); /* Callers of accept() will be be waiting on the listening socket, not * the pending socket. */ listener->sk_state_change(listener); return 0; destroy: pending->sk_err = skerr; pending->sk_state = SS_UNCONNECTED; /* As long as we drop our reference, all necessary cleanup will handle * when the cleanup function drops its reference and our destruct * implementation is called. Note that since the listen handler will * remove pending from the pending list upon our failure, the cleanup * function won't drop the additional reference, which is why we do it * here. */ sock_put(pending); return err; } static int vmci_transport_recv_connecting_client(struct sock *sk, struct vmci_transport_packet *pkt) { struct vsock_sock *vsk; int err; int skerr; vsk = vsock_sk(sk); switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_ATTACH: if (vmci_handle_is_invalid(pkt->u.handle) || !vmci_handle_is_equal(pkt->u.handle, vmci_trans(vsk)->qp_handle)) { skerr = EPROTO; err = -EINVAL; goto destroy; } /* Signify the socket is connected and wakeup the waiter in * connect(). Also place the socket in the connected table for * accounting (it can already be found since it's in the bound * table). */ sk->sk_state = SS_CONNECTED; sk->sk_socket->state = SS_CONNECTED; vsock_insert_connected(vsk); sk->sk_state_change(sk); break; case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE: case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2: if (pkt->u.size == 0 || pkt->dg.src.context != vsk->remote_addr.svm_cid || pkt->src_port != vsk->remote_addr.svm_port || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle) || vmci_trans(vsk)->qpair || vmci_trans(vsk)->produce_size != 0 || vmci_trans(vsk)->consume_size != 0 || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) { skerr = EPROTO; err = -EINVAL; goto destroy; } err = vmci_transport_recv_connecting_client_negotiate(sk, pkt); if (err) { skerr = -err; goto destroy; } break; case VMCI_TRANSPORT_PACKET_TYPE_INVALID: err = vmci_transport_recv_connecting_client_invalid(sk, pkt); if (err) { skerr = -err; goto destroy; } break; case VMCI_TRANSPORT_PACKET_TYPE_RST: /* Older versions of the linux code (WS 6.5 / ESX 4.0) used to * continue processing here after they sent an INVALID packet. * This meant that we got a RST after the INVALID. We ignore a * RST after an INVALID. The common code doesn't send the RST * ... so we can hang if an old version of the common code * fails between getting a REQUEST and sending an OFFER back. * Not much we can do about it... except hope that it doesn't * happen. */ if (vsk->ignore_connecting_rst) { vsk->ignore_connecting_rst = false; } else { skerr = ECONNRESET; err = 0; goto destroy; } break; default: /* Close and cleanup the connection. */ skerr = EPROTO; err = -EINVAL; goto destroy; } return 0; destroy: vmci_transport_send_reset(sk, pkt); sk->sk_state = SS_UNCONNECTED; sk->sk_err = skerr; sk->sk_error_report(sk); return err; } static int vmci_transport_recv_connecting_client_negotiate( struct sock *sk, struct vmci_transport_packet *pkt) { int err; struct vsock_sock *vsk; struct vmci_handle handle; struct vmci_qp *qpair; u32 attach_sub_id; u32 detach_sub_id; bool is_local; u32 flags; bool old_proto = true; bool old_pkt_proto; u16 version; vsk = vsock_sk(sk); handle = VMCI_INVALID_HANDLE; attach_sub_id = VMCI_INVALID_ID; detach_sub_id = VMCI_INVALID_ID; /* If we have gotten here then we should be past the point where old * linux vsock could have sent the bogus rst. */ vsk->sent_request = false; vsk->ignore_connecting_rst = false; /* Verify that we're OK with the proposed queue pair size */ if (pkt->u.size < vmci_trans(vsk)->queue_pair_min_size || pkt->u.size > vmci_trans(vsk)->queue_pair_max_size) { err = -EINVAL; goto destroy; } /* At this point we know the CID the peer is using to talk to us. */ if (vsk->local_addr.svm_cid == VMADDR_CID_ANY) vsk->local_addr.svm_cid = pkt->dg.dst.context; /* Setup the notify ops to be the highest supported version that both * the server and the client support. */ if (vmci_transport_old_proto_override(&old_pkt_proto)) { old_proto = old_pkt_proto; } else { if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE) old_proto = true; else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2) old_proto = false; } if (old_proto) version = VSOCK_PROTO_INVALID; else version = pkt->proto; if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) { err = -EINVAL; goto destroy; } /* Subscribe to attach and detach events first. * * XXX We attach once for each queue pair created for now so it is easy * to find the socket (it's provided), but later we should only * subscribe once and add a way to lookup sockets by queue pair handle. */ err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH, vmci_transport_peer_attach_cb, sk, &attach_sub_id); if (err < VMCI_SUCCESS) { err = vmci_transport_error_to_vsock_error(err); goto destroy; } err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH, vmci_transport_peer_detach_cb, sk, &detach_sub_id); if (err < VMCI_SUCCESS) { err = vmci_transport_error_to_vsock_error(err); goto destroy; } /* Make VMCI select the handle for us. */ handle = VMCI_INVALID_HANDLE; is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid; flags = is_local ? VMCI_QPFLAG_LOCAL : 0; err = vmci_transport_queue_pair_alloc(&qpair, &handle, pkt->u.size, pkt->u.size, vsk->remote_addr.svm_cid, flags, vmci_transport_is_trusted( vsk, vsk-> remote_addr.svm_cid)); if (err < 0) goto destroy; err = vmci_transport_send_qp_offer(sk, handle); if (err < 0) { err = vmci_transport_error_to_vsock_error(err); goto destroy; } vmci_trans(vsk)->qp_handle = handle; vmci_trans(vsk)->qpair = qpair; vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = pkt->u.size; vmci_trans(vsk)->attach_sub_id = attach_sub_id; vmci_trans(vsk)->detach_sub_id = detach_sub_id; vmci_trans(vsk)->notify_ops->process_negotiate(sk); return 0; destroy: if (attach_sub_id != VMCI_INVALID_ID) vmci_event_unsubscribe(attach_sub_id); if (detach_sub_id != VMCI_INVALID_ID) vmci_event_unsubscribe(detach_sub_id); if (!vmci_handle_is_invalid(handle)) vmci_qpair_detach(&qpair); return err; } static int vmci_transport_recv_connecting_client_invalid(struct sock *sk, struct vmci_transport_packet *pkt) { int err = 0; struct vsock_sock *vsk = vsock_sk(sk); if (vsk->sent_request) { vsk->sent_request = false; vsk->ignore_connecting_rst = true; err = vmci_transport_send_conn_request( sk, vmci_trans(vsk)->queue_pair_size); if (err < 0) err = vmci_transport_error_to_vsock_error(err); else err = 0; } return err; } static int vmci_transport_recv_connected(struct sock *sk, struct vmci_transport_packet *pkt) { struct vsock_sock *vsk; bool pkt_processed = false; /* In cases where we are closing the connection, it's sufficient to * mark the state change (and maybe error) and wake up any waiting * threads. Since this is a connected socket, it's owned by a user * process and will be cleaned up when the failure is passed back on * the current or next system call. Our system call implementations * must therefore check for error and state changes on entry and when * being awoken. */ switch (pkt->type) { case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN: if (pkt->u.mode) { vsk = vsock_sk(sk); vsk->peer_shutdown |= pkt->u.mode; sk->sk_state_change(sk); } break; case VMCI_TRANSPORT_PACKET_TYPE_RST: vsk = vsock_sk(sk); /* It is possible that we sent our peer a message (e.g a * WAITING_READ) right before we got notified that the peer had * detached. If that happens then we can get a RST pkt back * from our peer even though there is data available for us to * read. In that case, don't shutdown the socket completely but * instead allow the local client to finish reading data off * the queuepair. Always treat a RST pkt in connected mode like * a clean shutdown. */ sock_set_flag(sk, SOCK_DONE); vsk->peer_shutdown = SHUTDOWN_MASK; if (vsock_stream_has_data(vsk) <= 0) sk->sk_state = SS_DISCONNECTING; sk->sk_state_change(sk); break; default: vsk = vsock_sk(sk); vmci_trans(vsk)->notify_ops->handle_notify_pkt( sk, pkt, false, NULL, NULL, &pkt_processed); if (!pkt_processed) return -EINVAL; break; } return 0; } static int vmci_transport_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk) { vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL); if (!vsk->trans) return -ENOMEM; vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE; vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE; vmci_trans(vsk)->qpair = NULL; vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0; vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID; vmci_trans(vsk)->notify_ops = NULL; if (psk) { vmci_trans(vsk)->queue_pair_size = vmci_trans(psk)->queue_pair_size; vmci_trans(vsk)->queue_pair_min_size = vmci_trans(psk)->queue_pair_min_size; vmci_trans(vsk)->queue_pair_max_size = vmci_trans(psk)->queue_pair_max_size; } else { vmci_trans(vsk)->queue_pair_size = VMCI_TRANSPORT_DEFAULT_QP_SIZE; vmci_trans(vsk)->queue_pair_min_size = VMCI_TRANSPORT_DEFAULT_QP_SIZE_MIN; vmci_trans(vsk)->queue_pair_max_size = VMCI_TRANSPORT_DEFAULT_QP_SIZE_MAX; } return 0; } static void vmci_transport_destruct(struct vsock_sock *vsk) { if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) { vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id); vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID; } if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) { vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id); vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID; } if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) { vmci_qpair_detach(&vmci_trans(vsk)->qpair); vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE; vmci_trans(vsk)->produce_size = 0; vmci_trans(vsk)->consume_size = 0; } if (vmci_trans(vsk)->notify_ops) vmci_trans(vsk)->notify_ops->socket_destruct(vsk); kfree(vsk->trans); vsk->trans = NULL; } static void vmci_transport_release(struct vsock_sock *vsk) { if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) { vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle); vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE; } } static int vmci_transport_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr) { u32 port; u32 flags; int err; /* VMCI will select a resource ID for us if we provide * VMCI_INVALID_ID. */ port = addr->svm_port == VMADDR_PORT_ANY ? VMCI_INVALID_ID : addr->svm_port; if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; flags = addr->svm_cid == VMADDR_CID_ANY ? VMCI_FLAG_ANYCID_DG_HND : 0; err = vmci_transport_datagram_create_hnd(port, flags, vmci_transport_recv_dgram_cb, &vsk->sk, &vmci_trans(vsk)->dg_handle); if (err < VMCI_SUCCESS) return vmci_transport_error_to_vsock_error(err); vsock_addr_init(&vsk->local_addr, addr->svm_cid, vmci_trans(vsk)->dg_handle.resource); return 0; } static int vmci_transport_dgram_enqueue( struct vsock_sock *vsk, struct sockaddr_vm *remote_addr, struct msghdr *msg, size_t len) { int err; struct vmci_datagram *dg; if (len > VMCI_MAX_DG_PAYLOAD_SIZE) return -EMSGSIZE; if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid)) return -EPERM; /* Allocate a buffer for the user's message and our packet header. */ dg = kmalloc(len + sizeof(*dg), GFP_KERNEL); if (!dg) return -ENOMEM; memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len); dg->dst = vmci_make_handle(remote_addr->svm_cid, remote_addr->svm_port); dg->src = vmci_make_handle(vsk->local_addr.svm_cid, vsk->local_addr.svm_port); dg->payload_size = len; err = vmci_datagram_send(dg); kfree(dg); if (err < 0) return vmci_transport_error_to_vsock_error(err); return err - sizeof(*dg); } static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { int err; int noblock; struct vmci_datagram *dg; size_t payload_len; struct sk_buff *skb; noblock = flags & MSG_DONTWAIT; if (flags & MSG_OOB || flags & MSG_ERRQUEUE) return -EOPNOTSUPP; /* Retrieve the head sk_buff from the socket's receive queue. */ err = 0; skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); if (err) return err; if (!skb) return -EAGAIN; dg = (struct vmci_datagram *)skb->data; if (!dg) /* err is 0, meaning we read zero bytes. */ goto out; payload_len = dg->payload_size; /* Ensure the sk_buff matches the payload size claimed in the packet. */ if (payload_len != skb->len - sizeof(*dg)) { err = -EINVAL; goto out; } if (payload_len > len) { payload_len = len; msg->msg_flags |= MSG_TRUNC; } /* Place the datagram payload in the user's iovec. */ err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len); if (err) goto out; if (msg->msg_name) { /* Provide the address of the sender. */ DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name); vsock_addr_init(vm_addr, dg->src.context, dg->src.resource); msg->msg_namelen = sizeof(*vm_addr); } err = payload_len; out: skb_free_datagram(&vsk->sk, skb); return err; } static bool vmci_transport_dgram_allow(u32 cid, u32 port) { if (cid == VMADDR_CID_HYPERVISOR) { /* Registrations of PBRPC Servers do not modify VMX/Hypervisor * state and are allowed. */ return port == VMCI_UNITY_PBRPC_REGISTER; } return true; } static int vmci_transport_connect(struct vsock_sock *vsk) { int err; bool old_pkt_proto = false; struct sock *sk = &vsk->sk; if (vmci_transport_old_proto_override(&old_pkt_proto) && old_pkt_proto) { err = vmci_transport_send_conn_request( sk, vmci_trans(vsk)->queue_pair_size); if (err < 0) { sk->sk_state = SS_UNCONNECTED; return err; } } else { int supported_proto_versions = vmci_transport_new_proto_supported_versions(); err = vmci_transport_send_conn_request2( sk, vmci_trans(vsk)->queue_pair_size, supported_proto_versions); if (err < 0) { sk->sk_state = SS_UNCONNECTED; return err; } vsk->sent_request = true; } return err; } static ssize_t vmci_transport_stream_dequeue( struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { if (flags & MSG_PEEK) return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0); else return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0); } static ssize_t vmci_transport_stream_enqueue( struct vsock_sock *vsk, struct msghdr *msg, size_t len) { return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0); } static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk) { return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair); } static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk) { return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair); } static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk) { return vmci_trans(vsk)->consume_size; } static bool vmci_transport_stream_is_active(struct vsock_sock *vsk) { return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle); } static u64 vmci_transport_get_buffer_size(struct vsock_sock *vsk) { return vmci_trans(vsk)->queue_pair_size; } static u64 vmci_transport_get_min_buffer_size(struct vsock_sock *vsk) { return vmci_trans(vsk)->queue_pair_min_size; } static u64 vmci_transport_get_max_buffer_size(struct vsock_sock *vsk) { return vmci_trans(vsk)->queue_pair_max_size; } static void vmci_transport_set_buffer_size(struct vsock_sock *vsk, u64 val) { if (val < vmci_trans(vsk)->queue_pair_min_size) vmci_trans(vsk)->queue_pair_min_size = val; if (val > vmci_trans(vsk)->queue_pair_max_size) vmci_trans(vsk)->queue_pair_max_size = val; vmci_trans(vsk)->queue_pair_size = val; } static void vmci_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val) { if (val > vmci_trans(vsk)->queue_pair_size) vmci_trans(vsk)->queue_pair_size = val; vmci_trans(vsk)->queue_pair_min_size = val; } static void vmci_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val) { if (val < vmci_trans(vsk)->queue_pair_size) vmci_trans(vsk)->queue_pair_size = val; vmci_trans(vsk)->queue_pair_max_size = val; } static int vmci_transport_notify_poll_in( struct vsock_sock *vsk, size_t target, bool *data_ready_now) { return vmci_trans(vsk)->notify_ops->poll_in( &vsk->sk, target, data_ready_now); } static int vmci_transport_notify_poll_out( struct vsock_sock *vsk, size_t target, bool *space_available_now) { return vmci_trans(vsk)->notify_ops->poll_out( &vsk->sk, target, space_available_now); } static int vmci_transport_notify_recv_init( struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return vmci_trans(vsk)->notify_ops->recv_init( &vsk->sk, target, (struct vmci_transport_recv_notify_data *)data); } static int vmci_transport_notify_recv_pre_block( struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return vmci_trans(vsk)->notify_ops->recv_pre_block( &vsk->sk, target, (struct vmci_transport_recv_notify_data *)data); } static int vmci_transport_notify_recv_pre_dequeue( struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return vmci_trans(vsk)->notify_ops->recv_pre_dequeue( &vsk->sk, target, (struct vmci_transport_recv_notify_data *)data); } static int vmci_transport_notify_recv_post_dequeue( struct vsock_sock *vsk, size_t target, ssize_t copied, bool data_read, struct vsock_transport_recv_notify_data *data) { return vmci_trans(vsk)->notify_ops->recv_post_dequeue( &vsk->sk, target, copied, data_read, (struct vmci_transport_recv_notify_data *)data); } static int vmci_transport_notify_send_init( struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return vmci_trans(vsk)->notify_ops->send_init( &vsk->sk, (struct vmci_transport_send_notify_data *)data); } static int vmci_transport_notify_send_pre_block( struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return vmci_trans(vsk)->notify_ops->send_pre_block( &vsk->sk, (struct vmci_transport_send_notify_data *)data); } static int vmci_transport_notify_send_pre_enqueue( struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return vmci_trans(vsk)->notify_ops->send_pre_enqueue( &vsk->sk, (struct vmci_transport_send_notify_data *)data); } static int vmci_transport_notify_send_post_enqueue( struct vsock_sock *vsk, ssize_t written, struct vsock_transport_send_notify_data *data) { return vmci_trans(vsk)->notify_ops->send_post_enqueue( &vsk->sk, written, (struct vmci_transport_send_notify_data *)data); } static bool vmci_transport_old_proto_override(bool *old_pkt_proto) { if (PROTOCOL_OVERRIDE != -1) { if (PROTOCOL_OVERRIDE == 0) *old_pkt_proto = true; else *old_pkt_proto = false; pr_info("Proto override in use\n"); return true; } return false; } static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto, bool old_pkt_proto) { struct vsock_sock *vsk = vsock_sk(sk); if (old_pkt_proto) { if (*proto != VSOCK_PROTO_INVALID) { pr_err("Can't set both an old and new protocol\n"); return false; } vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops; goto exit; } switch (*proto) { case VSOCK_PROTO_PKT_ON_NOTIFY: vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_q_state_ops; break; default: pr_err("Unknown notify protocol version\n"); return false; } exit: vmci_trans(vsk)->notify_ops->socket_init(sk); return true; } static u16 vmci_transport_new_proto_supported_versions(void) { if (PROTOCOL_OVERRIDE != -1) return PROTOCOL_OVERRIDE; return VSOCK_PROTO_ALL_SUPPORTED; } static u32 vmci_transport_get_local_cid(void) { return vmci_get_context_id(); } static struct vsock_transport vmci_transport = { .init = vmci_transport_socket_init, .destruct = vmci_transport_destruct, .release = vmci_transport_release, .connect = vmci_transport_connect, .dgram_bind = vmci_transport_dgram_bind, .dgram_dequeue = vmci_transport_dgram_dequeue, .dgram_enqueue = vmci_transport_dgram_enqueue, .dgram_allow = vmci_transport_dgram_allow, .stream_dequeue = vmci_transport_stream_dequeue, .stream_enqueue = vmci_transport_stream_enqueue, .stream_has_data = vmci_transport_stream_has_data, .stream_has_space = vmci_transport_stream_has_space, .stream_rcvhiwat = vmci_transport_stream_rcvhiwat, .stream_is_active = vmci_transport_stream_is_active, .stream_allow = vmci_transport_stream_allow, .notify_poll_in = vmci_transport_notify_poll_in, .notify_poll_out = vmci_transport_notify_poll_out, .notify_recv_init = vmci_transport_notify_recv_init, .notify_recv_pre_block = vmci_transport_notify_recv_pre_block, .notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue, .notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue, .notify_send_init = vmci_transport_notify_send_init, .notify_send_pre_block = vmci_transport_notify_send_pre_block, .notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue, .notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue, .shutdown = vmci_transport_shutdown, .set_buffer_size = vmci_transport_set_buffer_size, .set_min_buffer_size = vmci_transport_set_min_buffer_size, .set_max_buffer_size = vmci_transport_set_max_buffer_size, .get_buffer_size = vmci_transport_get_buffer_size, .get_min_buffer_size = vmci_transport_get_min_buffer_size, .get_max_buffer_size = vmci_transport_get_max_buffer_size, .get_local_cid = vmci_transport_get_local_cid, }; static int __init vmci_transport_init(void) { int err; /* Create the datagram handle that we will use to send and receive all * VSocket control messages for this context. */ err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID, VMCI_FLAG_ANYCID_DG_HND, vmci_transport_recv_stream_cb, NULL, &vmci_transport_stream_handle); if (err < VMCI_SUCCESS) { pr_err("Unable to create datagram handle. (%d)\n", err); return vmci_transport_error_to_vsock_error(err); } err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED, vmci_transport_qp_resumed_cb, NULL, &vmci_transport_qp_resumed_sub_id); if (err < VMCI_SUCCESS) { pr_err("Unable to subscribe to resumed event. (%d)\n", err); err = vmci_transport_error_to_vsock_error(err); vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; goto err_destroy_stream_handle; } err = vsock_core_init(&vmci_transport); if (err < 0) goto err_unsubscribe; return 0; err_unsubscribe: vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id); err_destroy_stream_handle: vmci_datagram_destroy_handle(vmci_transport_stream_handle); return err; } module_init(vmci_transport_init); static void __exit vmci_transport_exit(void) { if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) { if (vmci_datagram_destroy_handle( vmci_transport_stream_handle) != VMCI_SUCCESS) pr_err("Couldn't destroy datagram handle\n"); vmci_transport_stream_handle = VMCI_INVALID_HANDLE; } if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) { vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id); vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; } vsock_core_exit(); } module_exit(vmci_transport_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("vmware_vsock"); MODULE_ALIAS_NETPROTO(PF_VSOCK);
gpl-2.0
raboof/linux
drivers/video/backlight/ltv350qv.c
131
8063
/* * Power control for Samsung LTV350QV Quarter VGA LCD Panel * * Copyright (C) 2006, 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include "ltv350qv.h" #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) struct ltv350qv { struct spi_device *spi; u8 *buffer; int power; struct lcd_device *ld; }; /* * The power-on and power-off sequences are taken from the * LTV350QV-F04 data sheet from Samsung. The register definitions are * taken from the S6F2002 command list also from Samsung. Both * documents are distributed with the AVR32 Linux BSP CD from Atmel. * * There's still some voodoo going on here, but it's a lot better than * in the first incarnation of the driver where all we had was the raw * numbers from the initialization sequence. */ static int ltv350qv_write_reg(struct ltv350qv *lcd, u8 reg, u16 val) { struct spi_message msg; struct spi_transfer index_xfer = { .len = 3, .cs_change = 1, }; struct spi_transfer value_xfer = { .len = 3, }; spi_message_init(&msg); /* register index */ lcd->buffer[0] = LTV_OPC_INDEX; lcd->buffer[1] = 0x00; lcd->buffer[2] = reg & 0x7f; index_xfer.tx_buf = lcd->buffer; spi_message_add_tail(&index_xfer, &msg); /* register value */ lcd->buffer[4] = LTV_OPC_DATA; lcd->buffer[5] = val >> 8; lcd->buffer[6] = val; value_xfer.tx_buf = lcd->buffer + 4; spi_message_add_tail(&value_xfer, &msg); return spi_sync(lcd->spi, &msg); } /* The comments are taken straight from the data sheet */ static int ltv350qv_power_on(struct ltv350qv *lcd) { int ret; /* Power On Reset Display off State */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, 0x0000)) goto err; msleep(15); /* Power Setting Function 1 */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE)) goto err; if (ltv350qv_write_reg(lcd, LTV_PWRCTL2, LTV_VCOML_ENABLE)) goto err_power1; /* Power Setting Function 2 */ if (ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5))) goto err_power2; msleep(55); /* Instruction Setting */ ret = ltv350qv_write_reg(lcd, LTV_IFCTL, LTV_NMD | LTV_REV | LTV_NL(0x1d)); ret |= ltv350qv_write_reg(lcd, LTV_DATACTL, LTV_DS_SAME | LTV_CHS_480 | LTV_DF_RGB | LTV_RGB_BGR); ret |= ltv350qv_write_reg(lcd, LTV_ENTRY_MODE, LTV_VSPL_ACTIVE_LOW | LTV_HSPL_ACTIVE_LOW | LTV_DPL_SAMPLE_RISING | LTV_EPL_ACTIVE_LOW | LTV_SS_RIGHT_TO_LEFT); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL1, LTV_CLW(3)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); ret |= ltv350qv_write_reg(lcd, LTV_VBP, 0x000a); ret |= ltv350qv_write_reg(lcd, LTV_HBP, 0x0021); ret |= ltv350qv_write_reg(lcd, LTV_SOTCTL, LTV_SDT(3) | LTV_EQ(0)); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(0), 0x0103); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(1), 0x0301); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(2), 0x1f0f); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(3), 0x1f0f); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(4), 0x0707); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(5), 0x0307); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(6), 0x0707); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(7), 0x0000); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(8), 0x0004); ret |= ltv350qv_write_reg(lcd, LTV_GAMMA(9), 0x0000); if (ret) goto err_settings; /* Wait more than 2 frames */ msleep(20); /* Display On Sequence */ ret = ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_VCOMOUT_ENABLE | LTV_POWER_ON | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_DSC | LTV_FWI(3)); if (ret) goto err_disp_on; /* Display should now be ON. Phew. */ return 0; err_disp_on: /* * Try to recover. Error handling probably isn't very useful * at this point, just make a best effort to switch the panel * off. */ ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); err_settings: err_power2: err_power1: ltv350qv_write_reg(lcd, LTV_PWRCTL2, 0x0000); msleep(1); err: ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE); return -EIO; } static int ltv350qv_power_off(struct ltv350qv *lcd) { int ret; /* Display Off Sequence */ ret = ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE | LTV_DRIVE_CURRENT(5) | LTV_SUPPLY_CURRENT(5)); ret |= ltv350qv_write_reg(lcd, LTV_GATECTL2, LTV_NW_INV_1LINE | LTV_FWI(3)); /* Power down setting 1 */ ret |= ltv350qv_write_reg(lcd, LTV_PWRCTL2, 0x0000); /* Wait at least 1 ms */ msleep(1); /* Power down setting 2 */ ret |= ltv350qv_write_reg(lcd, LTV_PWRCTL1, LTV_VCOM_DISABLE); /* * No point in trying to recover here. If we can't switch the * panel off, what are we supposed to do other than inform the * user about the failure? */ if (ret) return -EIO; /* Display power should now be OFF */ return 0; } static int ltv350qv_power(struct ltv350qv *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = ltv350qv_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = ltv350qv_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int ltv350qv_set_power(struct lcd_device *ld, int power) { struct ltv350qv *lcd = lcd_get_data(ld); return ltv350qv_power(lcd, power); } static int ltv350qv_get_power(struct lcd_device *ld) { struct ltv350qv *lcd = lcd_get_data(ld); return lcd->power; } static struct lcd_ops ltv_ops = { .get_power = ltv350qv_get_power, .set_power = ltv350qv_set_power, }; static int __devinit ltv350qv_probe(struct spi_device *spi) { struct ltv350qv *lcd; struct lcd_device *ld; int ret; lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL); if (!lcd) return -ENOMEM; lcd->spi = spi; lcd->power = FB_BLANK_POWERDOWN; lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL); if (!lcd->buffer) return -ENOMEM; ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); if (IS_ERR(ld)) return PTR_ERR(ld); lcd->ld = ld; ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK); if (ret) goto out_unregister; dev_set_drvdata(&spi->dev, lcd); return 0; out_unregister: lcd_device_unregister(ld); return ret; } static int __devexit ltv350qv_remove(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->ld); return 0; } #ifdef CONFIG_PM static int ltv350qv_suspend(struct spi_device *spi, pm_message_t state) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); return ltv350qv_power(lcd, FB_BLANK_POWERDOWN); } static int ltv350qv_resume(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); return ltv350qv_power(lcd, FB_BLANK_UNBLANK); } #else #define ltv350qv_suspend NULL #define ltv350qv_resume NULL #endif /* Power down all displays on reboot, poweroff or halt */ static void ltv350qv_shutdown(struct spi_device *spi) { struct ltv350qv *lcd = dev_get_drvdata(&spi->dev); ltv350qv_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver ltv350qv_driver = { .driver = { .name = "ltv350qv", .owner = THIS_MODULE, }, .probe = ltv350qv_probe, .remove = __devexit_p(ltv350qv_remove), .shutdown = ltv350qv_shutdown, .suspend = ltv350qv_suspend, .resume = ltv350qv_resume, }; module_spi_driver(ltv350qv_driver); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_DESCRIPTION("Samsung LTV350QV LCD Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ltv350qv");
gpl-2.0
that1/android_kernel_asus_tf700t
drivers/net/mlx4/en_netdev.c
387
32620
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/etherdevice.h> #include <linux/tcp.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/device.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/cq.h> #include "mlx4_en.h" #include "en_port.h" static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err; int idx; en_dbg(HW, priv, "adding VLAN:%d\n", vid); set_bit(vid, priv->active_vlans); /* Add VID to port VLAN filter */ mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed configuring VLAN filter\n"); } if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) en_err(priv, "failed adding vlan %d\n", vid); mutex_unlock(&mdev->state_lock); } static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err; int idx; en_dbg(HW, priv, "Killing VID:%d\n", vid); clear_bit(vid, priv->active_vlans); /* Remove VID from port VLAN filter */ mutex_lock(&mdev->state_lock); if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) mlx4_unregister_vlan(mdev->dev, priv->port, idx); else en_err(priv, "could not find vid %d in cache\n", vid); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed configuring VLAN filter\n"); } mutex_unlock(&mdev->state_lock); } u64 mlx4_en_mac_to_u64(u8 *addr) { u64 mac = 0; int i; for (i = 0; i < ETH_ALEN; i++) { mac <<= 8; mac |= addr[i]; } return mac; } static int mlx4_en_set_mac(struct net_device *dev, void *addr) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct sockaddr *saddr = addr; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); queue_work(mdev->workqueue, &priv->mac_task); return 0; } static void mlx4_en_do_set_mac(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, mac_task); struct mlx4_en_dev *mdev = priv->mdev; int err = 0; mutex_lock(&mdev->state_lock); if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_replace_mac(mdev->dev, priv->port, priv->base_qpn, priv->mac, 0); if (err) en_err(priv, "Failed changing HW MAC address\n"); } else en_dbg(HW, priv, "Port is down while " "registering mac, exiting...\n"); mutex_unlock(&mdev->state_lock); } static void mlx4_en_clear_list(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); kfree(priv->mc_addrs); priv->mc_addrs_cnt = 0; } static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct netdev_hw_addr *ha; char *mc_addrs; int mc_addrs_cnt = netdev_mc_count(dev); int i; mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); if (!mc_addrs) { en_err(priv, "failed to allocate multicast list\n"); return; } i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); priv->mc_addrs = mc_addrs; priv->mc_addrs_cnt = mc_addrs_cnt; } static void mlx4_en_set_multicast(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); if (!priv->port_up) return; queue_work(priv->mdev->workqueue, &priv->mcast_task); } static void mlx4_en_do_set_multicast(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, mcast_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; u64 mcast_addr = 0; u8 mc_list[16] = {0}; int err; mutex_lock(&mdev->state_lock); if (!mdev->device_up) { en_dbg(HW, priv, "Card is not up, " "ignoring multicast change.\n"); goto out; } if (!priv->port_up) { en_dbg(HW, priv, "Port is down, " "ignoring multicast change.\n"); goto out; } /* * Promsicuous mode: disable all filters */ if (dev->flags & IFF_PROMISC) { if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { if (netif_msg_rx_status(priv)) en_warn(priv, "Entering promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_PROMISC; /* Enable promiscouos mode */ if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 1); else err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed enabling " "promiscuous mode\n"); /* Disable port multicast filter (unconditionally) */ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling " "multicast filter\n"); /* Add the default qp number as multicast promisc */ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed entering multicast promisc mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } /* Disable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed disabling VLAN filter\n"); } goto out; } /* * Not in promiscuous mode */ if (priv->flags & MLX4_EN_FLAG_PROMISC) { if (netif_msg_rx_status(priv)) en_warn(priv, "Leaving promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_PROMISC; /* Disable promiscouos mode */ if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); else err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed disabling multicast promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; } /* Enable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) en_err(priv, "Failed enabling VLAN filter\n"); } /* Enable/disable the multicast filter according to IFF_ALLMULTI */ if (dev->flags & IFF_ALLMULTI) { err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling multicast filter\n"); /* Add the default qp number as multicast promisc */ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed entering multicast promisc mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } } else { int i; /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, priv->port); if (err) en_err(priv, "Failed disabling multicast promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; } err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_DISABLE); if (err) en_err(priv, "Failed disabling multicast filter\n"); /* Detach our qp from all the multicast addresses */ for (i = 0; i < priv->mc_addrs_cnt; i++) { memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH); } /* Flush mcast filter and init it with broadcast address */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1, MLX4_MCAST_CONFIG); /* Update multicast list - we cache all addresses so they won't * change while HW is updated holding the command semaphor */ netif_tx_lock_bh(dev); mlx4_en_cache_mclist(dev); netif_tx_unlock_bh(dev); for (i = 0; i < priv->mc_addrs_cnt; i++) { mcast_addr = mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 0, MLX4_PROT_ETH); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 0, MLX4_MCAST_ENABLE); if (err) en_err(priv, "Failed enabling multicast filter\n"); } out: mutex_unlock(&mdev->state_lock); } #ifdef CONFIG_NET_POLL_CONTROLLER static void mlx4_en_netpoll(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_cq *cq; unsigned long flags; int i; for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; spin_lock_irqsave(&cq->lock, flags); napi_synchronize(&cq->napi); mlx4_en_process_rx_cq(dev, cq, 0); spin_unlock_irqrestore(&cq->lock, flags); } } #endif static void mlx4_en_tx_timeout(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; if (netif_msg_timer(priv)) en_warn(priv, "Tx timeout called on port:%d\n", priv->port); priv->port_stats.tx_timeout++; en_dbg(DRV, priv, "Scheduling watchdog\n"); queue_work(mdev->workqueue, &priv->watchdog_task); } static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->stats_lock); memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); spin_unlock_bh(&priv->stats_lock); return &priv->ret_stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) { struct mlx4_en_cq *cq; int i; /* If we haven't received a specific coalescing setting * (module param), we set the moderation parameters as follows: * - moder_cnt is set to the number of mtu sized packets to * satisfy our coelsing target. * - moder_time is set to a fixed value. */ priv->rx_frames = MLX4_EN_RX_COAL_TARGET; priv->rx_usecs = MLX4_EN_RX_COAL_TIME; en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " "rx_frames:%d rx_usecs:%d\n", priv->dev->mtu, priv->rx_frames, priv->rx_usecs); /* Setup cq moderation params */ for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; cq->moder_cnt = priv->rx_frames; cq->moder_time = priv->rx_usecs; } for (i = 0; i < priv->tx_ring_num; i++) { cq = &priv->tx_cq[i]; cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; cq->moder_time = MLX4_EN_TX_COAL_TIME; } /* Reset auto-moderation params */ priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; priv->adaptive_rx_coal = 1; priv->last_moder_time = MLX4_EN_AUTO_CONF; priv->last_moder_jiffies = 0; priv->last_moder_packets = 0; priv->last_moder_tx_packets = 0; priv->last_moder_bytes = 0; } static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) { unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); struct mlx4_en_cq *cq; unsigned long packets; unsigned long rate; unsigned long avg_pkt_size; unsigned long rx_packets; unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_pkt_diff; unsigned long rx_pkt_diff; int moder_time; int i, err; if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) return; spin_lock_bh(&priv->stats_lock); rx_packets = priv->stats.rx_packets; rx_bytes = priv->stats.rx_bytes; tx_packets = priv->stats.tx_packets; spin_unlock_bh(&priv->stats_lock); if (!priv->last_moder_jiffies || !period) goto out; tx_pkt_diff = ((unsigned long) (tx_packets - priv->last_moder_tx_packets)); rx_pkt_diff = ((unsigned long) (rx_packets - priv->last_moder_packets)); packets = max(tx_pkt_diff, rx_pkt_diff); rate = packets * HZ / period; avg_pkt_size = packets ? ((unsigned long) (rx_bytes - priv->last_moder_bytes)) / packets : 0; /* Apply auto-moderation only when packet rate exceeds a rate that * it matters */ if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { /* If tx and rx packet rates are not balanced, assume that * traffic is mainly BW bound and apply maximum moderation. * Otherwise, moderate according to packet rate */ if (2 * tx_pkt_diff > 3 * rx_pkt_diff || 2 * rx_pkt_diff > 3 * tx_pkt_diff) { moder_time = priv->rx_usecs_high; } else { if (rate < priv->pkt_rate_low) moder_time = priv->rx_usecs_low; else if (rate > priv->pkt_rate_high) moder_time = priv->rx_usecs_high; else moder_time = (rate - priv->pkt_rate_low) * (priv->rx_usecs_high - priv->rx_usecs_low) / (priv->pkt_rate_high - priv->pkt_rate_low) + priv->rx_usecs_low; } } else { moder_time = priv->rx_usecs_low; } en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", priv->last_moder_time, moder_time, period, packets, avg_pkt_size, rate); if (moder_time != priv->last_moder_time) { priv->last_moder_time = moder_time; for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; cq->moder_time = moder_time; err = mlx4_en_set_cq_moder(priv, cq); if (err) { en_err(priv, "Failed modifying moderation for cq:%d\n", i); break; } } } out: priv->last_moder_packets = rx_packets; priv->last_moder_tx_packets = tx_packets; priv->last_moder_bytes = rx_bytes; priv->last_moder_jiffies = jiffies; } static void mlx4_en_do_get_stats(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, stats_task); struct mlx4_en_dev *mdev = priv->mdev; int err; err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); if (err) en_dbg(HW, priv, "Could not update stats\n"); mutex_lock(&mdev->state_lock); if (mdev->device_up) { if (priv->port_up) mlx4_en_auto_moderation(priv); queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { queue_work(mdev->workqueue, &priv->mac_task); mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; } mutex_unlock(&mdev->state_lock); } static void mlx4_en_linkstate(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, linkstate_task); struct mlx4_en_dev *mdev = priv->mdev; int linkstate = priv->link_state; mutex_lock(&mdev->state_lock); /* If observable port state changed set carrier state and * report to system log */ if (priv->last_link_state != linkstate) { if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { en_info(priv, "Link Down\n"); netif_carrier_off(priv->dev); } else { en_info(priv, "Link Up\n"); netif_carrier_on(priv->dev); } } priv->last_link_state = linkstate; mutex_unlock(&mdev->state_lock); } int mlx4_en_start_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq; struct mlx4_en_tx_ring *tx_ring; int rx_index = 0; int tx_index = 0; int err = 0; int i; int j; u8 mc_list[16] = {0}; char name[32]; if (priv->port_up) { en_dbg(DRV, priv, "start port called while port already up\n"); return 0; } /* Calculate Rx buf size */ dev->mtu = min(dev->mtu, priv->max_mtu); mlx4_en_calc_rx_buf(dev); en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); /* Configure rx cq's and rings */ err = mlx4_en_activate_rx_rings(priv); if (err) { en_err(priv, "Failed to activate RX rings\n"); return err; } for (i = 0; i < priv->rx_ring_num; i++) { cq = &priv->rx_cq[i]; err = mlx4_en_activate_cq(priv, cq); if (err) { en_err(priv, "Failed activating Rx CQ\n"); goto cq_err; } for (j = 0; j < cq->size; j++) cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; err = mlx4_en_set_cq_moder(priv, cq); if (err) { en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto cq_err; } mlx4_en_arm_cq(priv, cq); priv->rx_ring[i].cqn = cq->mcq.cqn; ++rx_index; } /* Set port mac number */ en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); err = mlx4_register_mac(mdev->dev, priv->port, priv->mac, &priv->base_qpn, 0); if (err) { en_err(priv, "Failed setting port mac\n"); goto cq_err; } mdev->mac_removed[priv->port] = 0; err = mlx4_en_config_rss_steer(priv); if (err) { en_err(priv, "Failed configuring rss steering\n"); goto mac_err; } if (mdev->dev->caps.comp_pool && !priv->tx_vector) { sprintf(name , "%s-tx", priv->dev->name); if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { mlx4_warn(mdev, "Failed Assigning an EQ to " "%s_tx ,Falling back to legacy " "EQ's\n", priv->dev->name); } } /* Configure tx cq's and rings */ for (i = 0; i < priv->tx_ring_num; i++) { /* Configure cq */ cq = &priv->tx_cq[i]; cq->vector = priv->tx_vector; err = mlx4_en_activate_cq(priv, cq); if (err) { en_err(priv, "Failed allocating Tx CQ\n"); goto tx_err; } err = mlx4_en_set_cq_moder(priv, cq); if (err) { en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); cq->buf->wqe_index = cpu_to_be16(0xffff); /* Configure ring */ tx_ring = &priv->tx_ring[i]; err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); if (err) { en_err(priv, "Failed allocating Tx ring\n"); mlx4_en_deactivate_cq(priv, cq); goto tx_err; } /* Set initial ownership of all Tx TXBBs to SW (1) */ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) *((u32 *) (tx_ring->buf + j)) = 0xffffffff; ++tx_index; } /* Configure port */ err = mlx4_SET_PORT_general(mdev->dev, priv->port, priv->rx_skb_size + ETH_FCS_LEN, priv->prof->tx_pause, priv->prof->tx_ppp, priv->prof->rx_pause, priv->prof->rx_ppp); if (err) { en_err(priv, "Failed setting port general configurations " "for port %d, with error %d\n", priv->port, err); goto tx_err; } /* Set default qp number */ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); if (err) { en_err(priv, "Failed setting default qp numbers\n"); goto tx_err; } /* Init port */ en_dbg(HW, priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); if (err) { en_err(priv, "Failed Initializing port\n"); goto tx_err; } /* Attach rx QP to bradcast address */ memset(&mc_list[10], 0xff, ETH_ALEN); mc_list[5] = priv->port; if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 0, MLX4_PROT_ETH)) mlx4_warn(mdev, "Failed Attaching Broadcast\n"); /* Must redo promiscuous mode setup. */ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->mcast_task); priv->port_up = true; netif_tx_start_all_queues(dev); return 0; tx_err: while (tx_index--) { mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); } mlx4_en_release_rss_steer(priv); mac_err: mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); cq_err: while (rx_index--) mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); return err; /* need to close devices */ } void mlx4_en_stop_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int i; u8 mc_list[16] = {0}; if (!priv->port_up) { en_dbg(DRV, priv, "stop port called while port already down\n"); return; } /* Synchronize with tx routine */ netif_tx_lock_bh(dev); netif_tx_stop_all_queues(dev); netif_tx_unlock_bh(dev); /* Set port as not active */ priv->port_up = false; /* Detach All multicasts */ memset(&mc_list[10], 0xff, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH); for (i = 0; i < priv->mc_addrs_cnt; i++) { memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH); } mlx4_en_clear_list(dev); /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); /* Unregister Mac address for the port */ mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); mdev->mac_removed[priv->port] = 1; /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); } msleep(10); for (i = 0; i < priv->tx_ring_num; i++) mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); /* Free RSS qps */ mlx4_en_release_rss_steer(priv); /* Free RX Rings */ for (i = 0; i < priv->rx_ring_num; i++) { mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) msleep(1); mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); } /* close port*/ mlx4_CLOSE_PORT(mdev->dev, priv->port); } static void mlx4_en_restart(struct work_struct *work) { struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, watchdog_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev); if (mlx4_en_start_port(dev)) en_err(priv, "Failed restarting port %d\n", priv->port); } mutex_unlock(&mdev->state_lock); } static int mlx4_en_open(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int i; int err = 0; mutex_lock(&mdev->state_lock); if (!mdev->device_up) { en_err(priv, "Cannot open - device down/disabled\n"); err = -EBUSY; goto out; } /* Reset HW statistics and performance counters */ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); memset(&priv->stats, 0, sizeof(priv->stats)); memset(&priv->pstats, 0, sizeof(priv->pstats)); for (i = 0; i < priv->tx_ring_num; i++) { priv->tx_ring[i].bytes = 0; priv->tx_ring[i].packets = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i].bytes = 0; priv->rx_ring[i].packets = 0; } err = mlx4_en_start_port(dev); if (err) en_err(priv, "Failed starting port:%d\n", priv->port); out: mutex_unlock(&mdev->state_lock); return err; } static int mlx4_en_close(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; en_dbg(IFDOWN, priv, "Close port called\n"); mutex_lock(&mdev->state_lock); mlx4_en_stop_port(dev); netif_carrier_off(dev); mutex_unlock(&mdev->state_lock); return 0; } void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) { int i; for (i = 0; i < priv->tx_ring_num; i++) { if (priv->tx_ring[i].tx_info) mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); if (priv->tx_cq[i].buf) mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); } for (i = 0; i < priv->rx_ring_num; i++) { if (priv->rx_ring[i].rx_info) mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); if (priv->rx_cq[i].buf) mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); } } int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; int i; int base_tx_qpn, err; err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); if (err) { en_err(priv, "failed reserving range for TX rings\n"); return err; } /* Create tx Rings */ for (i = 0; i < priv->tx_ring_num; i++) { if (mlx4_en_create_cq(priv, &priv->tx_cq[i], prof->tx_ring_size, i, TX)) goto err; if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, prof->tx_ring_size, TXBB_SIZE)) goto err; } /* Create rx Rings */ for (i = 0; i < priv->rx_ring_num; i++) { if (mlx4_en_create_cq(priv, &priv->rx_cq[i], prof->rx_ring_size, i, RX)) goto err; if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size, priv->stride)) goto err; } return 0; err: en_err(priv, "Failed to allocate NIC resources\n"); mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); return -ENOMEM; } void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); /* Unregister device - this will close the port if it was up */ if (priv->registered) unregister_netdev(dev); if (priv->allocated) mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); cancel_delayed_work(&priv->stats_task); /* flush any pending task for this netdev */ flush_workqueue(mdev->workqueue); /* Detach the netdev so tasks would not attempt to access it */ mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; mutex_unlock(&mdev->state_lock); mlx4_en_free_resources(priv, false); free_netdev(dev); } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; int err = 0; en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", dev->mtu, new_mtu); if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { en_err(priv, "Bad MTU size:%d.\n", new_mtu); return -EPERM; } dev->mtu = new_mtu; if (netif_running(dev)) { mutex_lock(&mdev->state_lock); if (!mdev->device_up) { /* NIC is probably restarting - let watchdog task reset * the port */ en_dbg(DRV, priv, "Change MTU called with card down!?\n"); } else { mlx4_en_stop_port(dev); err = mlx4_en_start_port(dev); if (err) { en_err(priv, "Failed restarting port:%d\n", priv->port); queue_work(mdev->workqueue, &priv->watchdog_task); } } mutex_unlock(&mdev->state_lock); } return 0; } static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, .ndo_get_stats = mlx4_en_get_stats, .ndo_set_multicast_list = mlx4_en_set_multicast, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx4_en_netpoll, #endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { struct net_device *dev; struct mlx4_en_priv *priv; int i; int err; dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), prof->tx_ring_num, prof->rx_ring_num); if (dev == NULL) { mlx4_err(mdev, "Net device allocation failed\n"); return -ENOMEM; } SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); dev->dev_id = port - 1; /* * Initialize driver private data */ priv = netdev_priv(dev); memset(priv, 0, sizeof(struct mlx4_en_priv)); priv->dev = dev; priv->mdev = mdev; priv->prof = prof; priv->port = port; priv->port_up = false; priv->flags = prof->flags; priv->tx_ring_num = prof->tx_ring_num; priv->rx_ring_num = prof->rx_ring_num; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); /* Query for default mac and max mtu */ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; priv->mac = mdev->dev->caps.def_mac[priv->port]; if (ILLEGAL_MAC(priv->mac)) { en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", priv->port, priv->mac); err = -EINVAL; goto out; } priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); err = mlx4_en_alloc_resources(priv); if (err) goto out; /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); if (err) { en_err(priv, "Failed to allocate page for rx qps\n"); goto out; } priv->allocated = 1; /* * Initialize netdev entry points */ dev->netdev_ops = &mlx4_netdev_ops; dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; netif_set_real_num_tx_queues(dev, priv->tx_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); /* Set defualt MAC */ dev->addr_len = ETH_ALEN; for (i = 0; i < ETH_ALEN; i++) { dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); } /* * Set driver features */ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; if (mdev->LSO_support) dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; dev->vlan_features = dev->hw_features; dev->hw_features |= NETIF_F_RXCSUM; dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; mdev->pndev[port] = dev; netif_carrier_off(dev); err = register_netdev(dev); if (err) { en_err(priv, "Netdev registration failed for port %d\n", port); goto out; } en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); /* Configure port */ err = mlx4_SET_PORT_general(mdev->dev, priv->port, MLX4_EN_MIN_MTU, 0, 0, 0, 0); if (err) { en_err(priv, "Failed setting port general configurations " "for port %d, with error %d\n", priv->port, err); goto out; } /* Init port */ en_warn(priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); if (err) { en_err(priv, "Failed Initializing port\n"); goto out; } priv->registered = 1; mlx4_en_set_default_moderation(priv); queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); return 0; out: mlx4_en_destroy_netdev(dev); return err; }
gpl-2.0
CyanogenMod/android_kernel_asus_tf201
drivers/staging/gma500/psb_intel_display.c
387
41036
/* * Copyright © 2006-2011 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/i2c.h> #include <linux/pm_runtime.h> #include <drm/drmP.h> #include "framebuffer.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" #include "psb_intel_display.h" #include "power.h" #include "mdfld_output.h" struct psb_intel_clock_t { /* given values */ int n; int m1, m2; int p1, p2; /* derived values */ int dot; int vco; int m; int p; }; struct psb_intel_range_t { int min, max; }; struct psb_intel_p2_t { int dot_limit; int p2_slow, p2_fast; }; #define INTEL_P2_NUM 2 struct psb_intel_limit_t { struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1; struct psb_intel_p2_t p2; }; #define I8XX_DOT_MIN 25000 #define I8XX_DOT_MAX 350000 #define I8XX_VCO_MIN 930000 #define I8XX_VCO_MAX 1400000 #define I8XX_N_MIN 3 #define I8XX_N_MAX 16 #define I8XX_M_MIN 96 #define I8XX_M_MAX 140 #define I8XX_M1_MIN 18 #define I8XX_M1_MAX 26 #define I8XX_M2_MIN 6 #define I8XX_M2_MAX 16 #define I8XX_P_MIN 4 #define I8XX_P_MAX 128 #define I8XX_P1_MIN 2 #define I8XX_P1_MAX 33 #define I8XX_P1_LVDS_MIN 1 #define I8XX_P1_LVDS_MAX 6 #define I8XX_P2_SLOW 4 #define I8XX_P2_FAST 2 #define I8XX_P2_LVDS_SLOW 14 #define I8XX_P2_LVDS_FAST 14 /* No fast option */ #define I8XX_P2_SLOW_LIMIT 165000 #define I9XX_DOT_MIN 20000 #define I9XX_DOT_MAX 400000 #define I9XX_VCO_MIN 1400000 #define I9XX_VCO_MAX 2800000 #define I9XX_N_MIN 3 #define I9XX_N_MAX 8 #define I9XX_M_MIN 70 #define I9XX_M_MAX 120 #define I9XX_M1_MIN 10 #define I9XX_M1_MAX 20 #define I9XX_M2_MIN 5 #define I9XX_M2_MAX 9 #define I9XX_P_SDVO_DAC_MIN 5 #define I9XX_P_SDVO_DAC_MAX 80 #define I9XX_P_LVDS_MIN 7 #define I9XX_P_LVDS_MAX 98 #define I9XX_P1_MIN 1 #define I9XX_P1_MAX 8 #define I9XX_P2_SDVO_DAC_SLOW 10 #define I9XX_P2_SDVO_DAC_FAST 5 #define I9XX_P2_SDVO_DAC_SLOW_LIMIT 200000 #define I9XX_P2_LVDS_SLOW 14 #define I9XX_P2_LVDS_FAST 7 #define I9XX_P2_LVDS_SLOW_LIMIT 112000 #define INTEL_LIMIT_I8XX_DVO_DAC 0 #define INTEL_LIMIT_I8XX_LVDS 1 #define INTEL_LIMIT_I9XX_SDVO_DAC 2 #define INTEL_LIMIT_I9XX_LVDS 3 static const struct psb_intel_limit_t psb_intel_limits[] = { { /* INTEL_LIMIT_I8XX_DVO_DAC */ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX}, .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST}, }, { /* INTEL_LIMIT_I8XX_LVDS */ .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX}, .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX}, .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX}, .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX}, .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX}, .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX}, .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX}, .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX}, .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT, .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST}, }, { /* INTEL_LIMIT_I9XX_SDVO_DAC */ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX}, .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST}, }, { /* INTEL_LIMIT_I9XX_LVDS */ .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX}, .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX}, .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX}, .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX}, .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX}, .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX}, .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX}, /* The single-channel range is 25-112Mhz, and dual-channel * is 80-224Mhz. Prefer single channel as much as possible. */ .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST}, }, }; static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc) { const struct psb_intel_limit_t *limit; if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS]; else limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC]; return limit; } /** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } /** Derive the pixel clock for the given refclk and divisors for 9xx chips. */ static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock) { clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); clock->dot = clock->vco / clock->p; } static void psb_intel_clock(struct drm_device *dev, int refclk, struct psb_intel_clock_t *clock) { return i9xx_clock(refclk, clock); } /** * Returns whether any output on the specified pipe is of the specified type */ bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type) { struct drm_device *dev = crtc->dev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *l_entry; list_for_each_entry(l_entry, &mode_config->connector_list, head) { if (l_entry->encoder && l_entry->encoder->crtc == crtc) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(l_entry); if (psb_intel_output->type == type) return true; } } return false; } #define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } /** * Returns whether the given set of divisors are valid for a given refclk with * the given connectors. */ static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc, struct psb_intel_clock_t *clock) { const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) INTELPllInvalid("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); if (clock->m1 <= clock->m2) INTELPllInvalid("m1 <= m2\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) INTELPllInvalid("m out of range\n"); if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) INTELPllInvalid("vco out of range\n"); /* XXX: We may need to be checking "Dot clock" * depending on the multiplier, connector, etc., * rather than just a single range. */ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) INTELPllInvalid("dot out of range\n"); return true; } /** * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target, int refclk, struct psb_intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct psb_intel_clock_t clock; const struct psb_intel_limit_t *limit = psb_intel_limit(crtc); int err = target; if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { /* * For LVDS, if the panel is on, just rely on its current * settings for dual-channel. We haven't figured out how to * reliably set up different single/dual channel state, if we * even can. */ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) clock.p2 = limit->p2.p2_fast; else clock.p2 = limit->p2.p2_slow; } else { if (target < limit->p2.dot_limit) clock.p2 = limit->p2.p2_slow; else clock.p2 = limit->p2.p2_fast; } memset(best_clock, 0, sizeof(*best_clock)); for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 && clock.m2 <= limit->m2.max; clock.m2++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; clock.p1++) { int this_err; psb_intel_clock(dev, refclk, &clock); if (!psb_intel_PLL_is_valid (crtc, &clock)) continue; this_err = abs(clock.dot - target); if (this_err < err) { *best_clock = clock; err = this_err; } } } } } return err != target; } void psb_intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ mdelay(20); } int psb_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; /* struct drm_i915_master_private *master_priv; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb); int pipe = psb_intel_crtc->pipe; unsigned long start, offset; int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE); int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; u32 dspcntr; int ret = 0; if (!gma_power_begin(dev, true)) return 0; /* no fb bound */ if (!crtc->fb) { dev_dbg(dev->dev, "No FB bound\n"); goto psb_intel_pipe_cleaner; } /* We are displaying this buffer, make sure it is actually loaded into the GTT */ ret = psb_gtt_pin(psbfb->gtt); if (ret < 0) goto psb_intel_pipe_set_base_exit; start = psbfb->gtt->offset; offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); REG_WRITE(dspstride, crtc->fb->pitch); dspcntr = REG_READ(dspcntr_reg); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; switch (crtc->fb->bits_per_pixel) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: if (crtc->fb->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; break; case 24: case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break; default: dev_err(dev->dev, "Unknown color depth\n"); ret = -EINVAL; psb_gtt_unpin(psbfb->gtt); goto psb_intel_pipe_set_base_exit; } REG_WRITE(dspcntr_reg, dspcntr); if (0 /* FIXMEAC - check what PSB needs */) { REG_WRITE(dspbase, offset); REG_READ(dspbase); REG_WRITE(dspsurf, start); REG_READ(dspsurf); } else { REG_WRITE(dspbase, start + offset); REG_READ(dspbase); } psb_intel_pipe_cleaner: /* If there was a previous display we can now unpin it */ if (old_fb) psb_gtt_unpin(to_psb_fb(old_fb)->gtt); psb_intel_pipe_set_base_exit: gma_power_end(dev); return ret; } /** * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back * on appropriately at the same time as we're turning the pipe off/on. */ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; /* struct drm_i915_master_private *master_priv; */ /* struct drm_i915_private *dev_priv = dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; u32 temp; bool enabled; /* XXX: When our outputs are all unaware of DPMS modes other than off * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. */ switch (mode) { case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: /* Enable the DPLL */ temp = REG_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) == 0) { REG_WRITE(dpll_reg, temp); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); } /* Enable the pipe */ temp = REG_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) == 0) REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE); /* Enable the plane */ temp = REG_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) == 0) { REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); } psb_intel_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ break; case DRM_MODE_DPMS_OFF: /* Give the overlay scaler a chance to disable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ /* Disable the VGA plane that we never use */ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Disable display plane */ temp = REG_READ(dspcntr_reg); if ((temp & DISPLAY_PLANE_ENABLE) != 0) { REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE); /* Flush the plane changes */ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg)); REG_READ(dspbase_reg); } /* Next, disable display pipes */ temp = REG_READ(pipeconf_reg); if ((temp & PIPEACONF_ENABLE) != 0) { REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); REG_READ(pipeconf_reg); } /* Wait for vblank for the disable to take effect. */ psb_intel_wait_for_vblank(dev); temp = REG_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) != 0) { REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE); REG_READ(dpll_reg); } /* Wait for the clocks to turn off. */ udelay(150); break; } enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; /*Set FIFO Watermarks*/ REG_WRITE(DSPARB, 0x3F3E); } static void psb_intel_crtc_prepare(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); } static void psb_intel_crtc_commit(struct drm_crtc *crtc) { struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); } void psb_intel_encoder_prepare(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of prepare see psb_intel_lvds_prepare */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } void psb_intel_encoder_commit(struct drm_encoder *encoder) { struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; /* lvds has its own version of commit see psb_intel_lvds_commit */ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } /** * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ static int psb_intel_panel_fitter_pipe(struct drm_device *dev) { u32 pfit_control; pfit_control = REG_READ(PFIT_CONTROL); /* See if the panel fitter is in use */ if ((pfit_control & PFIT_ENABLE) == 0) return -1; /* Must be on PIPE 1 for PSB */ return 1; } static int psb_intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; int pipe = psb_intel_crtc->pipe; int fp_reg = (pipe == 0) ? FPA0 : FPB0; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B; int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B; int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B; int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; int refclk; struct psb_intel_clock_t clock; u32 dpll = 0, fp = 0, dspcntr, pipeconf; bool ok, is_sdvo = false, is_dvo = false; bool is_crt = false, is_lvds = false, is_tv = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; /* No scan out no play */ if (crtc->fb == NULL) { crtc_funcs->mode_set_base(crtc, x, y, old_fb); return 0; } list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (!connector->encoder || connector->encoder->crtc != crtc) continue; switch (psb_intel_output->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: is_sdvo = true; break; case INTEL_OUTPUT_DVO: is_dvo = true; break; case INTEL_OUTPUT_TVOUT: is_tv = true; break; case INTEL_OUTPUT_ANALOG: is_crt = true; break; } } refclk = 96000; ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); return 0; } fp = clock.n << 16 | clock.m1 << 8 | clock.m2; dpll = DPLL_VGA_MODE_DIS; if (is_lvds) { dpll |= DPLLB_MODE_LVDS; dpll |= DPLL_DVO_HIGH_SPEED; } else dpll |= DPLLB_MODE_DAC_SERIAL; if (is_sdvo) { int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; dpll |= DPLL_DVO_HIGH_SPEED; dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } /* compute bitmask from p1 value */ dpll |= (1 << (clock.p1 - 1)) << 16; switch (clock.p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; case 7: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; break; case 10: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; break; case 14: dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } if (is_tv) { /* XXX: just matching BIOS for now */ /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ dpll |= 3; } dpll |= PLL_REF_INPUT_DREFCLK; /* setup pipeconf */ pipeconf = REG_READ(pipeconf_reg); /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; if (pipe == 0) dspcntr |= DISPPLANE_SEL_PIPE_A; else dspcntr |= DISPPLANE_SEL_PIPE_B; dspcntr |= DISPLAY_PLANE_ENABLE; pipeconf |= PIPEACONF_ENABLE; dpll |= DPLL_VCO_ENABLE; /* Disable the panel fitter if it was on our pipe */ if (psb_intel_panel_fitter_pipe(dev) == pipe) REG_WRITE(PFIT_CONTROL, 0); drm_mode_debug_printmodeline(mode); if (dpll & DPLL_VCO_ENABLE) { REG_WRITE(fp_reg, fp); REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); REG_READ(dpll_reg); udelay(150); } /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. */ if (is_lvds) { u32 lvds = REG_READ(LVDS); lvds &= ~LVDS_PIPEB_SELECT; if (pipe == 1) lvds |= LVDS_PIPEB_SELECT; lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; /* Set the B0-B3 data pairs corresponding to * whether we're going to * set the DPLLs for dual-channel mode or not. */ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); if (clock.p2 == 7) lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) * appropriately here, but we need to look more * thoroughly into how panels behave in the two modes. */ REG_WRITE(LVDS, lvds); REG_READ(LVDS); } REG_WRITE(fp_reg, fp); REG_WRITE(dpll_reg, dpll); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); /* write it again -- the BIOS does, after all */ REG_WRITE(dpll_reg, dpll); REG_READ(dpll_reg); /* Wait for the clocks to stabilize. */ udelay(150); REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16)); REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16)); REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. */ REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); REG_WRITE(dsppos_reg, 0); REG_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); REG_WRITE(pipeconf_reg, pipeconf); REG_READ(pipeconf_reg); psb_intel_wait_for_vblank(dev); REG_WRITE(dspcntr_reg, dspcntr); /* Flush the plane changes */ crtc_funcs->mode_set_base(crtc, x, y, old_fb); psb_intel_wait_for_vblank(dev); return 0; } /** Loads the palette/gamma unit for the CRTC with the prepared values */ void psb_intel_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int palreg = PALETTE_A; int i; /* The clocks have to be on to load the palette. */ if (!crtc->enabled) return; switch (psb_intel_crtc->pipe) { case 0: break; case 1: palreg = PALETTE_B; break; case 2: palreg = PALETTE_C; break; default: dev_err(dev->dev, "Illegal Pipe Number.\n"); return; } if (gma_power_begin(dev, false)) { for (i = 0; i < 256; i++) { REG_WRITE(palreg + 4 * i, ((psb_intel_crtc->lut_r[i] + psb_intel_crtc->lut_adj[i]) << 16) | ((psb_intel_crtc->lut_g[i] + psb_intel_crtc->lut_adj[i]) << 8) | (psb_intel_crtc->lut_b[i] + psb_intel_crtc->lut_adj[i])); } gma_power_end(dev); } else { for (i = 0; i < 256; i++) { dev_priv->save_palette_a[i] = ((psb_intel_crtc->lut_r[i] + psb_intel_crtc->lut_adj[i]) << 16) | ((psb_intel_crtc->lut_g[i] + psb_intel_crtc->lut_adj[i]) << 8) | (psb_intel_crtc->lut_b[i] + psb_intel_crtc->lut_adj[i]); } } } /** * Save HW states of giving crtc */ static void psb_intel_crtc_save(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; /* struct drm_psb_private *dev_priv = (struct drm_psb_private *)dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; int pipeA = (psb_intel_crtc->pipe == 0); uint32_t paletteReg; int i; if (!crtc_state) { dev_err(dev->dev, "No CRTC state found\n"); return; } crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR); crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF); crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC); crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0); crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1); crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B); crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B); crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B); crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B); crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B); crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B); crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B); crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE); /*NOTE: DSPSIZE DSPPOS only for psb*/ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE); crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS); crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE); paletteReg = pipeA ? PALETTE_A : PALETTE_B; for (i = 0; i < 256; ++i) crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2)); } /** * Restore HW states of giving crtc */ static void psb_intel_crtc_restore(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; /* struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private; */ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state; /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */ int pipeA = (psb_intel_crtc->pipe == 0); uint32_t paletteReg; int i; if (!crtc_state) { dev_err(dev->dev, "No crtc state\n"); return; } if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); REG_READ(pipeA ? DPLL_A : DPLL_B); udelay(150); } REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0); REG_READ(pipeA ? FPA0 : FPB0); REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1); REG_READ(pipeA ? FPA1 : FPB1); REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL); REG_READ(pipeA ? DPLL_A : DPLL_B); udelay(150); REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL); REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK); REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC); REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL); REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK); REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC); REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE); REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE); REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS); REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC); REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF); psb_intel_wait_for_vblank(dev); REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR); REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE); psb_intel_wait_for_vblank(dev); paletteReg = pipeA ? PALETTE_A : PALETTE_B; for (i = 0; i < 256; ++i) REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]); } static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp; size_t addr = 0; struct gtt_range *gt; struct drm_gem_object *obj; int ret; /* if we want to turn of the cursor ignore width and height */ if (!handle) { /* turn off the cursor */ temp = CURSOR_MODE_DISABLE; if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); REG_WRITE(base, 0); gma_power_end(dev); } /* Unpin the old GEM object */ if (psb_intel_crtc->cursor_obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = NULL; } return 0; } /* Currently we only support 64x64 cursors */ if (width != 64 || height != 64) { dev_dbg(dev->dev, "we currently only support 64x64 cursors\n"); return -EINVAL; } obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) return -ENOENT; if (obj->size < width * height * 4) { dev_dbg(dev->dev, "buffer is to small\n"); return -ENOMEM; } gt = container_of(obj, struct gtt_range, gem); /* Pin the memory into the GTT */ ret = psb_gtt_pin(gt); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); return ret; } addr = gt->offset; /* Or resource.start ??? */ psb_intel_crtc->cursor_addr = addr; temp = 0; /* set the pipe for the cursor */ temp |= (pipe << 28); temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); REG_WRITE(base, addr); gma_power_end(dev); } /* unpin the old bo */ if (psb_intel_crtc->cursor_obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = obj; } return 0; } static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct drm_device *dev = crtc->dev; struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; uint32_t temp = 0; uint32_t addr; if (x < 0) { temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); x = -x; } if (y < 0) { temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); y = -y; } temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); addr = psb_intel_crtc->cursor_addr; if (gma_power_begin(dev, false)) { REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); gma_power_end(dev); } return 0; } void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t type, uint32_t size) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int i; if (size != 256) return; for (i = 0; i < 256; i++) { psb_intel_crtc->lut_r[i] = red[i] >> 8; psb_intel_crtc->lut_g[i] = green[i] >> 8; psb_intel_crtc->lut_b[i] = blue[i] >> 8; } psb_intel_crtc_load_lut(crtc); } static int psb_crtc_set_config(struct drm_mode_set *set) { int ret; struct drm_device *dev = set->crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; if (!dev_priv->rpm_enabled) return drm_crtc_helper_set_config(set); pm_runtime_forbid(&dev->pdev->dev); ret = drm_crtc_helper_set_config(set); pm_runtime_allow(&dev->pdev->dev); return ret; } /* Returns the clock of the currently programmed mode of the given pipe. */ static int psb_intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; u32 dpll; u32 fp; struct psb_intel_clock_t clock; bool is_lvds; struct drm_psb_private *dev_priv = dev->dev_private; if (gma_power_begin(dev, false)) { dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B); if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = REG_READ((pipe == 0) ? FPA0 : FPB0); else fp = REG_READ((pipe == 0) ? FPA1 : FPB1); is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN); gma_power_end(dev); } else { dpll = (pipe == 0) ? dev_priv->saveDPLL_A : dev_priv->saveDPLL_B; if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) fp = (pipe == 0) ? dev_priv->saveFPA0 : dev_priv->saveFPB0; else fp = (pipe == 0) ? dev_priv->saveFPA1 : dev_priv->saveFPB1; is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN); } clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; if (is_lvds) { clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); clock.p2 = 14; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ i8xx_clock(66000, &clock); } else i8xx_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; else { clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; } if (dpll & PLL_P2_DIVIDE_BY_4) clock.p2 = 4; else clock.p2 = 2; i8xx_clock(48000, &clock); } /* XXX: It would be nice to validate the clocks, but we can't reuse * i830PllIsValid() because it relies on the xf86_config connector * configuration being accurate, which it isn't necessarily. */ return clock.dot; } /** Returns the currently programmed mode of the given pipe. */ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); int pipe = psb_intel_crtc->pipe; struct drm_display_mode *mode; int htot; int hsync; int vtot; int vsync; struct drm_psb_private *dev_priv = dev->dev_private; if (gma_power_begin(dev, false)) { htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B); hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B); vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B); vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B); gma_power_end(dev); } else { htot = (pipe == 0) ? dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B; hsync = (pipe == 0) ? dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B; vtot = (pipe == 0) ? dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B; vsync = (pipe == 0) ? dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B; } mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) return NULL; mode->clock = psb_intel_crtc_clock_get(dev, crtc); mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; mode->vdisplay = (vtot & 0xffff) + 1; mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; mode->vsync_start = (vsync & 0xffff) + 1; mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); return mode; } void psb_intel_crtc_destroy(struct drm_crtc *crtc) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); struct gtt_range *gt; /* Unpin the old GEM object */ if (psb_intel_crtc->cursor_obj) { gt = container_of(psb_intel_crtc->cursor_obj, struct gtt_range, gem); psb_gtt_unpin(gt); drm_gem_object_unreference(psb_intel_crtc->cursor_obj); psb_intel_crtc->cursor_obj = NULL; } kfree(psb_intel_crtc->crtc_state); drm_crtc_cleanup(crtc); kfree(psb_intel_crtc); } const struct drm_crtc_helper_funcs psb_intel_helper_funcs = { .dpms = psb_intel_crtc_dpms, .mode_fixup = psb_intel_crtc_mode_fixup, .mode_set = psb_intel_crtc_mode_set, .mode_set_base = psb_intel_pipe_set_base, .prepare = psb_intel_crtc_prepare, .commit = psb_intel_crtc_commit, }; const struct drm_crtc_funcs psb_intel_crtc_funcs = { .save = psb_intel_crtc_save, .restore = psb_intel_crtc_restore, .cursor_set = psb_intel_crtc_cursor_set, .cursor_move = psb_intel_crtc_cursor_move, .gamma_set = psb_intel_crtc_gamma_set, .set_config = psb_crtc_set_config, .destroy = psb_intel_crtc_destroy, }; /* * Set the default value of cursor control and base register * to zero. This is a workaround for h/w defect on Oaktrail */ static void psb_intel_cursor_init(struct drm_device *dev, int pipe) { u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; u32 base[3] = { CURABASE, CURBBASE, CURCBASE }; REG_WRITE(control[pipe], 0); REG_WRITE(base[pipe], 0); } void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct psb_intel_mode_device *mode_dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_crtc *psb_intel_crtc; int i; uint16_t *r_base, *g_base, *b_base; /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ psb_intel_crtc = kzalloc(sizeof(struct psb_intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (psb_intel_crtc == NULL) return; psb_intel_crtc->crtc_state = kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL); if (!psb_intel_crtc->crtc_state) { dev_err(dev->dev, "Crtc state error: No memory\n"); kfree(psb_intel_crtc); return; } /* Set the CRTC operations from the chip specific data */ drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs); drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256); psb_intel_crtc->pipe = pipe; psb_intel_crtc->plane = pipe; r_base = psb_intel_crtc->base.gamma_store; g_base = r_base + 256; b_base = g_base + 256; for (i = 0; i < 256; i++) { psb_intel_crtc->lut_r[i] = i; psb_intel_crtc->lut_g[i] = i; psb_intel_crtc->lut_b[i] = i; r_base[i] = i << 8; g_base[i] = i << 8; b_base[i] = i << 8; psb_intel_crtc->lut_adj[i] = 0; } psb_intel_crtc->mode_dev = mode_dev; psb_intel_crtc->cursor_addr = 0; drm_crtc_helper_add(&psb_intel_crtc->base, dev_priv->ops->crtc_helper); /* Setup the array of drm_connector pointer array */ psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base; BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL); dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] = &psb_intel_crtc->base; dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] = &psb_intel_crtc->base; psb_intel_crtc->mode_set.connectors = (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; psb_intel_cursor_init(dev, pipe); } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_psb_private *dev_priv = dev->dev_private; struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data; struct drm_mode_object *drmmode_obj; struct psb_intel_crtc *crtc; if (!dev_priv) { dev_err(dev->dev, "called with no initialization\n"); return -EINVAL; } drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, DRM_MODE_OBJECT_CRTC); if (!drmmode_obj) { dev_err(dev->dev, "no such CRTC id\n"); return -EINVAL; } crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj)); pipe_from_crtc_id->pipe = crtc->pipe; return 0; } struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { struct drm_crtc *crtc = NULL; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc); if (psb_intel_crtc->pipe == pipe) break; } return crtc; } int psb_intel_connector_clones(struct drm_device *dev, int type_mask) { int index_mask = 0; struct drm_connector *connector; int entry = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); if (type_mask & (1 << psb_intel_output->type)) index_mask |= (1 << entry); entry++; } return index_mask; } void psb_intel_modeset_cleanup(struct drm_device *dev) { drm_mode_config_cleanup(dev); } /* current intel driver doesn't take advantage of encoders always give back the encoder for the connector */ struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector) { struct psb_intel_output *psb_intel_output = to_psb_intel_output(connector); return &psb_intel_output->enc; }
gpl-2.0
muftiarfan/DWI_xm
drivers/staging/prima/CORE/MAC/src/pe/lim/limProcessBeaconFrame.c
387
12307
/* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * * Airgo Networks, Inc proprietary. All rights reserved. * This file limProcessBeaconFrame.cc contains the code * for processing Received Beacon Frame. * Author: Chandra Modumudi * Date: 03/01/02 * History:- * Date Modified by Modification Information * -------------------------------------------------------------------- * */ #if (WNI_POLARIS_FW_PRODUCT == AP) #include "wniCfgAp.h" #else #include "wniCfgSta.h" #endif #include "aniGlobal.h" #include "cfgApi.h" #include "schApi.h" #include "wniCfgAp.h" #ifdef FEATURE_WLAN_NON_INTEGRATED_SOC #include "halCommonApi.h" #endif #include "utilsApi.h" #include "limTypes.h" #include "limUtils.h" #include "limAssocUtils.h" #include "limPropExtsUtils.h" #include "limSerDesUtils.h" /** * limProcessBeaconFrame * *FUNCTION: * This function is called by limProcessMessageQueue() upon Beacon * frame reception. * *LOGIC: * *ASSUMPTIONS: * *NOTE: * 1. Beacons received in 'normal' state in IBSS are handled by * Beacon Processing module. * * @param pMac - Pointer to Global MAC structure * @param *pRxPacketInfo - A pointer to RX packet info structure * @return None */ void limProcessBeaconFrame(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo,tpPESession psessionEntry) { tpSirMacMgmtHdr pHdr; tSchBeaconStruct *pBeacon; pMac->lim.gLimNumBeaconsRcvd++; /* here is it required to increment session specific heartBeat beacon counter */ pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); PELOG2(limLog(pMac, LOG2, FL("Received Beacon frame with length=%d from "), WDA_GET_RX_MPDU_LEN(pRxPacketInfo)); limPrintMacAddr(pMac, pHdr->sa, LOG2);) if (limDeactivateMinChannelTimerDuringScan(pMac) != eSIR_SUCCESS) return; /** * Expect Beacon only when * 1. STA is in Scan mode waiting for Beacon/Probe response or * 2. STA is waiting for Beacon/Probe Respose Frame * to announce join success. * 3. STA/AP is in Learn mode */ if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE) || (psessionEntry->limMlmState == eLIM_MLM_WT_JOIN_BEACON_STATE)) { if(eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **)&pBeacon, sizeof(tSchBeaconStruct))) { limLog(pMac, LOGE, FL("Unable to PAL allocate memory in limProcessBeaconFrame\n") ); return; } // Parse received Beacon if (sirConvertBeaconFrame2Struct(pMac, (tANI_U8 *) pRxPacketInfo, pBeacon) != eSIR_SUCCESS) { // Received wrongly formatted/invalid Beacon. // Ignore it and move on. limLog(pMac, LOGW, FL("Received invalid Beacon in state %X\n"), psessionEntry->limMlmState); limPrintMlmState(pMac, LOGW, psessionEntry->limMlmState); palFreeMemory(pMac->hHdd, pBeacon); return; } MTRACE(macTrace(pMac, TRACE_CODE_RX_MGMT_TSF, 0, pBeacon->timeStamp[0]);) MTRACE(macTrace(pMac, TRACE_CODE_RX_MGMT_TSF, 0, pBeacon->timeStamp[1]);) if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE)) { #ifdef WLAN_FEATURE_P2P //If we are scanning for P2P, only accept probe rsp if((pMac->lim.gLimHalScanState != eLIM_HAL_SCANNING_STATE) || (NULL == pMac->lim.gpLimMlmScanReq) || !pMac->lim.gpLimMlmScanReq->p2pSearch ) #endif { limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo, ((pMac->lim.gLimHalScanState == eLIM_HAL_SCANNING_STATE) ? eANI_BOOLEAN_TRUE : eANI_BOOLEAN_FALSE), eANI_BOOLEAN_FALSE); } } else if (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE) { #if (WNI_POLARIS_FW_PRODUCT == AP) && (WNI_POLARIS_FW_PACKAGE == ADVANCED) // STA/AP is in learn mode /* Not sure whether the below 2 lines are needed for the station. TODO If yes, this should be * uncommented. Also when we tested enabling this, there is a crash as soon as the station * comes up which needs to be fixed*/ //if (pMac->lim.gLimSystemRole == eLIM_STA_ROLE) // limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo, eANI_BOOLEAN_TRUE); limCollectMeasurementData(pMac, pRxPacketInfo, pBeacon); PELOG3(limLog(pMac, LOG3, FL("Parsed WDS info in Beacon frames: wdsLength=%d\n"), pBeacon->propIEinfo.wdsLength);) #endif } else { if( psessionEntry->beacon != NULL ) { palFreeMemory(pMac->hHdd, psessionEntry->beacon); psessionEntry->beacon = NULL; } psessionEntry->bcnLen = WDA_GET_RX_PAYLOAD_LEN(pRxPacketInfo); if( (palAllocateMemory(pMac->hHdd, (void**)&psessionEntry->beacon, psessionEntry->bcnLen)) != eHAL_STATUS_SUCCESS) { PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store beacon"));) } else { //Store the Beacon/ProbeRsp. This is sent to csr/hdd in join cnf response. palCopyMemory(pMac->hHdd, psessionEntry->beacon, WDA_GET_RX_MPDU_DATA(pRxPacketInfo), psessionEntry->bcnLen); } // STA in WT_JOIN_BEACON_STATE (IBSS) limCheckAndAnnounceJoinSuccess(pMac, pBeacon, pHdr,psessionEntry); } // if (pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) palFreeMemory(pMac->hHdd, pBeacon); } // if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || ... else { // Ignore Beacon frame in all other states if (psessionEntry->limMlmState == eLIM_MLM_JOINED_STATE || psessionEntry->limMlmState == eLIM_MLM_BSS_STARTED_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME2_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME3_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME4_STATE || psessionEntry->limMlmState == eLIM_MLM_AUTH_RSP_TIMEOUT_STATE || psessionEntry->limMlmState == eLIM_MLM_AUTHENTICATED_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_ASSOC_RSP_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_REASSOC_RSP_STATE || psessionEntry->limMlmState == eLIM_MLM_ASSOCIATED_STATE || psessionEntry->limMlmState == eLIM_MLM_REASSOCIATED_STATE || psessionEntry->limMlmState == eLIM_MLM_WT_ASSOC_CNF_STATE || limIsReassocInProgress(pMac,psessionEntry)) { // nothing unexpected about beacon in these states pMac->lim.gLimNumBeaconsIgnored++; } else { PELOG1(limLog(pMac, LOG1, FL("Received Beacon in unexpected state %d\n"), psessionEntry->limMlmState); limPrintMlmState(pMac, LOG1, psessionEntry->limMlmState);) #ifdef WLAN_DEBUG pMac->lim.gLimUnexpBcnCnt++; #endif } } return; } /*** end limProcessBeaconFrame() ***/ /**--------------------------------------------------------------- \fn limProcessBeaconFrameNoSession \brief This function is called by limProcessMessageQueue() \ upon Beacon reception. \ \param pMac \param *pRxPacketInfo - A pointer to Rx packet info structure \return None ------------------------------------------------------------------*/ void limProcessBeaconFrameNoSession(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo) { tpSirMacMgmtHdr pHdr; tSchBeaconStruct *pBeacon; pMac->lim.gLimNumBeaconsRcvd++; pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo); limLog(pMac, LOG2, FL("Received Beacon frame with length=%d from "), WDA_GET_RX_MPDU_LEN(pRxPacketInfo)); limPrintMacAddr(pMac, pHdr->sa, LOG2); if (limDeactivateMinChannelTimerDuringScan(pMac) != eSIR_SUCCESS) return; /** * No session has been established. Expect Beacon only when * 1. STA is in Scan mode waiting for Beacon/Probe response or * 2. STA/AP is in Learn mode */ if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE)) { if(eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void **)&pBeacon, sizeof(tSchBeaconStruct))) { limLog(pMac, LOGE, FL("Unable to PAL allocate memory in limProcessBeaconFrameNoSession\n") ); return; } if (sirConvertBeaconFrame2Struct(pMac, (tANI_U8 *) pRxPacketInfo, pBeacon) != eSIR_SUCCESS) { // Received wrongly formatted/invalid Beacon. Ignore and move on. limLog(pMac, LOGW, FL("Received invalid Beacon in global MLM state %X\n"), pMac->lim.gLimMlmState); limPrintMlmState(pMac, LOGW, pMac->lim.gLimMlmState); palFreeMemory(pMac->hHdd, pBeacon); return; } if ( (pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || (pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) ) { #ifdef WLAN_FEATURE_P2P //If we are scanning for P2P, only accept probe rsp if((pMac->lim.gLimHalScanState != eLIM_HAL_SCANNING_STATE) || (NULL == pMac->lim.gpLimMlmScanReq) || !pMac->lim.gpLimMlmScanReq->p2pSearch ) #endif { limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo, eANI_BOOLEAN_TRUE, eANI_BOOLEAN_FALSE); } } else if (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE) { #if (WNI_POLARIS_FW_PRODUCT == AP) && (WNI_POLARIS_FW_PACKAGE == ADVANCED) // STA/AP is in learn mode /* Not sure whether the below 2 lines are needed for the station. TODO If yes, this should be * uncommented. Also when we tested enabling this, there is a crash as soon as the station * comes up which needs to be fixed*/ //if (pMac->lim.gLimSystemRole == eLIM_STA_ROLE) // limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo, eANI_BOOLEAN_TRUE); limCollectMeasurementData(pMac, pRxPacketInfo, pBeacon); limLog(pMac, LOG3, FL("Parsed WDS info in Beacon frames: wdsLength=%d\n"), pBeacon->propIEinfo.wdsLength); #endif } // end of eLIM_MLM_LEARN_STATE) palFreeMemory(pMac->hHdd, pBeacon); } // end of (eLIM_MLM_WT_PROBE_RESP_STATE) || (eLIM_MLM_PASSIVE_SCAN_STATE) else { limLog(pMac, LOG1, FL("Rcvd Beacon in unexpected MLM state %d\n"), pMac->lim.gLimMlmState); limPrintMlmState(pMac, LOG1, pMac->lim.gLimMlmState); #ifdef WLAN_DEBUG pMac->lim.gLimUnexpBcnCnt++; #endif } return; } /*** end limProcessBeaconFrameNoSession() ***/
gpl-2.0
SOKP/kernel_oneplus_msm8974
arch/arm/mach-msm/ocmem_sched.c
643
56177
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/idr.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <mach/ocmem_priv.h> enum request_states { R_FREE = 0x0, /* request is not allocated */ R_PENDING, /* request has a pending operation */ R_ALLOCATED, /* request has been allocated */ R_ENQUEUED, /* request has been enqueued for future retry */ R_MUST_GROW, /* request must grow as a part of pending operation */ R_MUST_SHRINK, /* request must shrink */ R_WF_SHRINK, /* shrink must be ack'ed by a client */ R_SHRUNK, /* request was shrunk */ R_MUST_MAP, /* request must be mapped before being used */ R_MUST_UNMAP, /* request must be unmapped when not being used */ R_MAPPED, /* request is mapped and actively used by client */ R_UNMAPPED, /* request is not mapped, so it's not in active use */ R_EVICTED, /* request is evicted and must be restored */ }; #define SET_STATE(x, val) (set_bit((val), &(x)->state)) #define CLEAR_STATE(x, val) (clear_bit((val), &(x)->state)) #define TEST_STATE(x, val) (test_bit((val), &(x)->state)) enum op_res { OP_COMPLETE = 0x0, OP_RESCHED, OP_PARTIAL, OP_EVICT, OP_FAIL = ~0x0, }; /* Represents various client priorities */ /* Note: More than one client can share a priority level */ enum client_prio { MIN_PRIO = 0x0, NO_PRIO = MIN_PRIO, PRIO_SENSORS = 0x1, PRIO_OTHER_OS = 0x1, PRIO_LP_AUDIO = 0x1, PRIO_HP_AUDIO = 0x2, PRIO_VOICE = 0x3, PRIO_GFX_GROWTH = 0x4, PRIO_VIDEO = 0x5, PRIO_GFX = 0x6, PRIO_OCMEM = 0x7, MAX_OCMEM_PRIO = PRIO_OCMEM + 1, }; static void __iomem *ocmem_vaddr; static struct list_head sched_queue[MAX_OCMEM_PRIO]; static struct mutex sched_queue_mutex; /* The duration in msecs before a pending operation is scheduled * This allows an idle window between use case boundaries where various * hardware state changes can occur. The value will be tweaked on actual * hardware. */ /* Delay in ms for switching to low power mode for OCMEM */ #define SCHED_DELAY 5000 static struct list_head rdm_queue; static struct mutex rdm_mutex; static struct workqueue_struct *ocmem_rdm_wq; static struct workqueue_struct *ocmem_eviction_wq; static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX]; struct ocmem_rdm_work { int id; struct ocmem_map_list *list; struct ocmem_handle *handle; int direction; struct work_struct work; }; /* OCMEM Operational modes */ enum ocmem_client_modes { OCMEM_PERFORMANCE = 1, OCMEM_PASSIVE, OCMEM_LOW_POWER, OCMEM_MODE_MAX = OCMEM_LOW_POWER }; /* OCMEM Addressing modes */ enum ocmem_interconnects { OCMEM_BLOCKED = 0, OCMEM_PORT = 1, OCMEM_OCMEMNOC = 2, OCMEM_SYSNOC = 3, }; enum ocmem_tz_client { TZ_UNUSED = 0x0, TZ_GRAPHICS, TZ_VIDEO, TZ_LP_AUDIO, TZ_SENSORS, TZ_OTHER_OS, TZ_DEBUG, }; /** * Primary OCMEM Arbitration Table **/ struct ocmem_table { int client_id; int priority; int mode; int hw_interconnect; int tz_id; } ocmem_client_table[OCMEM_CLIENT_MAX] = { {OCMEM_GRAPHICS, PRIO_GFX, OCMEM_PERFORMANCE, OCMEM_PORT, TZ_GRAPHICS}, {OCMEM_VIDEO, PRIO_VIDEO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC, TZ_VIDEO}, {OCMEM_CAMERA, NO_PRIO, OCMEM_PERFORMANCE, OCMEM_OCMEMNOC, TZ_UNUSED}, {OCMEM_HP_AUDIO, PRIO_HP_AUDIO, OCMEM_PASSIVE, OCMEM_BLOCKED, TZ_UNUSED}, {OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED, TZ_UNUSED}, {OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_LP_AUDIO}, {OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_SENSORS}, {OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC, TZ_OTHER_OS}, }; static struct rb_root sched_tree; static struct mutex sched_mutex; static struct mutex allocation_mutex; static struct mutex free_mutex; /* A region represents a continuous interval in OCMEM address space */ struct ocmem_region { /* Chain in Interval Tree */ struct rb_node region_rb; /* Hash map of requests */ struct idr region_idr; /* Chain in eviction list */ struct list_head eviction_list; unsigned long r_start; unsigned long r_end; unsigned long r_sz; /* Highest priority of all requests served by this region */ int max_prio; }; /* Is OCMEM tightly coupled to the client ?*/ static inline int is_tcm(int id) { if (ocmem_client_table[id].hw_interconnect == OCMEM_PORT || ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC) return 1; else return 0; } static inline int is_iface_access(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_OCMEMNOC ? 1 : 0; } static inline int is_remapped_access(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_SYSNOC ? 1 : 0; } static inline int is_blocked(int id) { return ocmem_client_table[id].hw_interconnect == OCMEM_BLOCKED ? 1 : 0; } inline struct ocmem_buf *handle_to_buffer(struct ocmem_handle *handle) { if (handle) return &handle->buffer; else return NULL; } inline struct ocmem_handle *buffer_to_handle(struct ocmem_buf *buffer) { if (buffer) return container_of(buffer, struct ocmem_handle, buffer); else return NULL; } inline struct ocmem_req *handle_to_req(struct ocmem_handle *handle) { if (handle) return handle->req; else return NULL; } inline struct ocmem_handle *req_to_handle(struct ocmem_req *req) { if (req && req->buffer) return container_of(req->buffer, struct ocmem_handle, buffer); else return NULL; } /* Simple wrappers which will have debug features added later */ inline int ocmem_read(void *at) { return readl_relaxed(at); } inline int ocmem_write(unsigned long val, void *at) { writel_relaxed(val, at); return 0; } inline int get_mode(int id) { if (!check_id(id)) return MODE_NOT_SET; else return ocmem_client_table[id].mode == OCMEM_PERFORMANCE ? WIDE_MODE : THIN_MODE; } inline int get_tz_id(int id) { if (!check_id(id)) return TZ_UNUSED; else return ocmem_client_table[id].tz_id; } /* Returns the address that can be used by a device core to access OCMEM */ static unsigned long device_address(int id, unsigned long addr) { int hw_interconnect = ocmem_client_table[id].hw_interconnect; unsigned long ret_addr = 0x0; switch (hw_interconnect) { case OCMEM_PORT: case OCMEM_OCMEMNOC: ret_addr = phys_to_offset(addr); break; case OCMEM_SYSNOC: ret_addr = addr; break; case OCMEM_BLOCKED: ret_addr = 0x0; break; } return ret_addr; } /* Returns the address as viewed by the core */ static unsigned long core_address(int id, unsigned long addr) { int hw_interconnect = ocmem_client_table[id].hw_interconnect; unsigned long ret_addr = 0x0; switch (hw_interconnect) { case OCMEM_PORT: case OCMEM_OCMEMNOC: ret_addr = offset_to_phys(addr); break; case OCMEM_SYSNOC: ret_addr = addr; break; case OCMEM_BLOCKED: ret_addr = 0x0; break; } return ret_addr; } static inline struct ocmem_zone *zone_of(struct ocmem_req *req) { int owner; if (!req) return NULL; owner = req->owner; return get_zone(owner); } static int insert_region(struct ocmem_region *region) { struct rb_root *root = &sched_tree; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ocmem_region *tmp = NULL; unsigned long addr = region->r_start; while (*p) { parent = *p; tmp = rb_entry(parent, struct ocmem_region, region_rb); if (tmp->r_end > addr) { if (tmp->r_start <= addr) break; p = &(*p)->rb_left; } else if (tmp->r_end <= addr) p = &(*p)->rb_right; } rb_link_node(&region->region_rb, parent, p); rb_insert_color(&region->region_rb, root); return 0; } static int remove_region(struct ocmem_region *region) { struct rb_root *root = &sched_tree; rb_erase(&region->region_rb, root); return 0; } static struct ocmem_req *ocmem_create_req(void) { struct ocmem_req *p = NULL; p = kzalloc(sizeof(struct ocmem_req), GFP_KERNEL); if (!p) return NULL; INIT_LIST_HEAD(&p->zone_list); INIT_LIST_HEAD(&p->sched_list); init_rwsem(&p->rw_sem); SET_STATE(p, R_FREE); pr_debug("request %p created\n", p); return p; } static int ocmem_destroy_req(struct ocmem_req *req) { kfree(req); return 0; } static struct ocmem_region *create_region(void) { struct ocmem_region *p = NULL; p = kzalloc(sizeof(struct ocmem_region), GFP_KERNEL); if (!p) return NULL; idr_init(&p->region_idr); INIT_LIST_HEAD(&p->eviction_list); p->r_start = p->r_end = p->r_sz = 0x0; p->max_prio = NO_PRIO; return p; } static int destroy_region(struct ocmem_region *region) { idr_destroy(&region->region_idr); kfree(region); return 0; } static int attach_req(struct ocmem_region *region, struct ocmem_req *req) { int ret, id; while (1) { if (idr_pre_get(&region->region_idr, GFP_KERNEL) == 0) return -ENOMEM; ret = idr_get_new_above(&region->region_idr, req, 1, &id); if (ret != -EAGAIN) break; } if (!ret) { req->req_id = id; pr_debug("ocmem: request %p(id:%d) attached to region %p\n", req, id, region); return 0; } return -EINVAL; } static int detach_req(struct ocmem_region *region, struct ocmem_req *req) { idr_remove(&region->region_idr, req->req_id); return 0; } static int populate_region(struct ocmem_region *region, struct ocmem_req *req) { region->r_start = req->req_start; region->r_end = req->req_end; region->r_sz = req->req_end - req->req_start + 1; return 0; } static int region_req_count(int id, void *ptr, void *data) { int *count = data; *count = *count + 1; return 0; } static int req_count(struct ocmem_region *region) { int count = 0; idr_for_each(&region->region_idr, region_req_count, &count); return count; } static int compute_max_prio(int id, void *ptr, void *data) { int *max = data; struct ocmem_req *req = ptr; if (req->prio > *max) *max = req->prio; return 0; } static int update_region_prio(struct ocmem_region *region) { int max_prio; if (req_count(region) != 0) { idr_for_each(&region->region_idr, compute_max_prio, &max_prio); region->max_prio = max_prio; } else { region->max_prio = NO_PRIO; } pr_debug("ocmem: Updating prio of region %p as %d\n", region, max_prio); return 0; } static struct ocmem_region *find_region(unsigned long addr) { struct ocmem_region *region = NULL; struct rb_node *rb_node = NULL; rb_node = sched_tree.rb_node; while (rb_node) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); if (tmp_region->r_end > addr) { region = tmp_region; if (tmp_region->r_start <= addr) break; rb_node = rb_node->rb_left; } else { rb_node = rb_node->rb_right; } } return region; } static struct ocmem_region *find_region_intersection(unsigned long start, unsigned long end) { struct ocmem_region *region = NULL; region = find_region(start); if (region && end <= region->r_start) region = NULL; return region; } static struct ocmem_region *find_region_match(unsigned long start, unsigned long end) { struct ocmem_region *region = NULL; region = find_region(start); if (region && start == region->r_start && end == region->r_end) return region; return NULL; } static struct ocmem_req *find_req_match(int owner, struct ocmem_region *region) { struct ocmem_req *req = NULL; if (!region) return NULL; req = idr_find(&region->region_idr, owner); return req; } /* Must be called with req->sem held */ static inline int is_mapped(struct ocmem_req *req) { return TEST_STATE(req, R_MAPPED); } static inline int is_pending_shrink(struct ocmem_req *req) { return TEST_STATE(req, R_MUST_SHRINK) || TEST_STATE(req, R_WF_SHRINK); } /* Must be called with sched_mutex held */ static int __sched_unmap(struct ocmem_req *req) { struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; if (!TEST_STATE(req, R_MAPPED)) goto invalid_op_error; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } if (!is_mapped(req)) { pr_err("Request is not currently mapped"); goto invalid_op_error; } /* Update the request state */ CLEAR_STATE(req, R_MAPPED); SET_STATE(req, R_MUST_MAP); return OP_COMPLETE; invalid_op_error: return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_map(struct ocmem_req *req) { struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } /* Update the request state */ CLEAR_STATE(req, R_MUST_MAP); SET_STATE(req, R_MAPPED); return OP_COMPLETE; invalid_op_error: return OP_FAIL; } static int do_map(struct ocmem_req *req) { int rc = 0; down_write(&req->rw_sem); mutex_lock(&sched_mutex); rc = __sched_map(req); mutex_unlock(&sched_mutex); up_write(&req->rw_sem); if (rc == OP_FAIL) return -EINVAL; return 0; } static int do_unmap(struct ocmem_req *req) { int rc = 0; down_write(&req->rw_sem); mutex_lock(&sched_mutex); rc = __sched_unmap(req); mutex_unlock(&sched_mutex); up_write(&req->rw_sem); if (rc == OP_FAIL) return -EINVAL; return 0; } static int process_map(struct ocmem_req *req, unsigned long start, unsigned long end) { int rc = 0; rc = ocmem_restore_sec_program(OCMEM_SECURE_DEV_ID); if (rc < 0) { pr_err("ocmem: Failed to restore security programming\n"); goto lock_failed; } rc = ocmem_lock(req->owner, phys_to_offset(req->req_start), req->req_sz, get_mode(req->owner)); if (rc < 0) { pr_err("ocmem: Failed to secure request %p for %d\n", req, req->owner); goto lock_failed; } rc = do_map(req); if (rc < 0) { pr_err("ocmem: Failed to map request %p for %d\n", req, req->owner); goto process_map_fail; } pr_debug("ocmem: Mapped request %p\n", req); return 0; process_map_fail: ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz); lock_failed: pr_err("ocmem: Failed to map ocmem request\n"); return rc; } static int process_unmap(struct ocmem_req *req, unsigned long start, unsigned long end) { int rc = 0; rc = do_unmap(req); if (rc < 0) goto process_unmap_fail; rc = ocmem_unlock(req->owner, phys_to_offset(req->req_start), req->req_sz); if (rc < 0) { pr_err("ocmem: Failed to un-secure request %p for %d\n", req, req->owner); goto unlock_failed; } pr_debug("ocmem: Unmapped request %p\n", req); return 0; unlock_failed: process_unmap_fail: pr_err("ocmem: Failed to unmap ocmem request\n"); return rc; } static int __sched_grow(struct ocmem_req *req, bool can_block) { unsigned long min = req->req_min; unsigned long max = req->req_max; unsigned long step = req->req_step; int owner = req->owner; unsigned long curr_sz = 0; unsigned long growth_sz = 0; unsigned long curr_start = 0; enum client_prio prio = req->prio; unsigned long alloc_addr = 0x0; bool retry; struct ocmem_region *spanned_r = NULL; struct ocmem_region *overlap_r = NULL; int rc = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_zone *zone = get_zone(owner); struct ocmem_region *region = NULL; matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) { pr_err("Could not find backing region for req"); goto invalid_op_error; } if (matched_req != req) { pr_err("Request does not match backing req"); goto invalid_op_error; } curr_sz = matched_req->req_sz; curr_start = matched_req->req_start; growth_sz = matched_req->req_max - matched_req->req_sz; pr_debug("Attempting to grow req %p from %lx to %lx\n", req, matched_req->req_sz, matched_req->req_max); retry = false; pr_debug("ocmem: GROW: growth size %lx\n", growth_sz); retry_next_step: spanned_r = NULL; overlap_r = NULL; spanned_r = find_region(zone->z_head); overlap_r = find_region_intersection(zone->z_head, zone->z_head + growth_sz); if (overlap_r == NULL) { /* no conflicting regions, schedule this region */ zone->z_ops->free(zone, curr_start, curr_sz); rc = zone->z_ops->allocate(zone, curr_sz + growth_sz, &alloc_addr); if (rc) { pr_err("ocmem: zone allocation operation failed\n"); goto internal_error; } curr_sz += growth_sz; /* Detach the region from the interval tree */ /* This is to guarantee that any change in size * causes the tree to be rebalanced if required */ detach_req(matched_region, req); if (req_count(matched_region) == 0) { remove_region(matched_region); region = matched_region; } else { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto region_error; } } /* update the request */ req->req_start = alloc_addr; /* increment the size to reflect new length */ req->req_sz = curr_sz; req->req_end = alloc_addr + req->req_sz - 1; /* update request state */ CLEAR_STATE(req, R_MUST_GROW); SET_STATE(req, R_ALLOCATED); SET_STATE(req, R_MUST_MAP); req->op = SCHED_MAP; /* update the region with new req */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); goto region_error; } if (retry) { SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_PENDING); req->op = SCHED_GROW; return OP_PARTIAL; } } else if (spanned_r != NULL && overlap_r != NULL) { /* resolve conflicting regions based on priority */ if (overlap_r->max_prio < prio) { /* Growth cannot be triggered unless a previous * client of lower priority was evicted */ pr_err("ocmem: Invalid growth scheduled\n"); /* This is serious enough to fail */ BUG(); return OP_FAIL; } else if (overlap_r->max_prio > prio) { if (min == max) { /* Cannot grow at this time, try later */ SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } else { /* Try to grow in steps */ growth_sz -= step; /* We are OOM at this point so need to retry */ if (growth_sz <= curr_sz) { SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } retry = true; pr_debug("ocmem: Attempting with reduced size %lx\n", growth_sz); goto retry_next_step; } } else { pr_err("ocmem: grow: New Region %p Existing %p\n", matched_region, overlap_r); pr_err("ocmem: Undetermined behavior\n"); /* This is serious enough to fail */ BUG(); } } else if (spanned_r == NULL && overlap_r != NULL) { goto err_not_supported; } return OP_COMPLETE; err_not_supported: pr_err("ocmem: Scheduled unsupported operation\n"); return OP_FAIL; region_error: zone->z_ops->free(zone, alloc_addr, curr_sz); detach_req(region, req); update_region_prio(region); /* req is going to be destroyed by the caller anyways */ internal_error: destroy_region(region); invalid_op_error: return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_free(struct ocmem_req *req) { int owner = req->owner; int ret = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_zone *zone = get_zone(owner); BUG_ON(!zone); matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) goto invalid_op_error; if (matched_req != req) goto invalid_op_error; ret = zone->z_ops->free(zone, matched_req->req_start, matched_req->req_sz); if (ret < 0) goto err_op_fail; detach_req(matched_region, matched_req); update_region_prio(matched_region); if (req_count(matched_region) == 0) { remove_region(matched_region); destroy_region(matched_region); } /* Update the request */ req->req_start = 0x0; req->req_sz = 0x0; req->req_end = 0x0; SET_STATE(req, R_FREE); return OP_COMPLETE; invalid_op_error: pr_err("ocmem: free: Failed to find matching region\n"); err_op_fail: pr_err("ocmem: free: Failed\n"); return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz) { int owner = req->owner; int ret = 0; struct ocmem_req *matched_req = NULL; struct ocmem_region *matched_region = NULL; struct ocmem_region *region = NULL; unsigned long alloc_addr = 0x0; int rc = 0; struct ocmem_zone *zone = get_zone(owner); BUG_ON(!zone); /* The shrink should not be called for zero size */ BUG_ON(new_sz == 0); matched_region = find_region_match(req->req_start, req->req_end); matched_req = find_req_match(req->req_id, matched_region); if (!matched_region || !matched_req) goto invalid_op_error; if (matched_req != req) goto invalid_op_error; ret = zone->z_ops->free(zone, matched_req->req_start, matched_req->req_sz); if (ret < 0) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } rc = zone->z_ops->allocate(zone, new_sz, &alloc_addr); if (rc) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } /* Detach the region from the interval tree */ /* This is to guarantee that the change in size * causes the tree to be rebalanced if required */ detach_req(matched_region, req); if (req_count(matched_region) == 0) { remove_region(matched_region); region = matched_region; } else { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto internal_error; } } /* update the request */ req->req_start = alloc_addr; req->req_sz = new_sz; req->req_end = alloc_addr + req->req_sz; if (req_count(region) == 0) { remove_region(matched_region); destroy_region(matched_region); } /* update request state */ SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_MUST_MAP); req->op = SCHED_MAP; /* attach the request to the region */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); zone->z_ops->free(zone, alloc_addr, new_sz); detach_req(region, req); update_region_prio(region); /* req will be destroyed by the caller */ goto region_error; } return OP_COMPLETE; region_error: destroy_region(region); internal_error: pr_err("ocmem: shrink: Failed\n"); return OP_FAIL; invalid_op_error: pr_err("ocmem: shrink: Failed to find matching region\n"); return OP_FAIL; } /* Must be called with sched_mutex held */ static int __sched_allocate(struct ocmem_req *req, bool can_block, bool can_wait) { unsigned long min = req->req_min; unsigned long max = req->req_max; unsigned long step = req->req_step; int owner = req->owner; unsigned long sz = max; enum client_prio prio = req->prio; unsigned long alloc_addr = 0x0; bool retry; int rc = 0; struct ocmem_region *spanned_r = NULL; struct ocmem_region *overlap_r = NULL; struct ocmem_zone *zone = get_zone(owner); struct ocmem_region *region = NULL; BUG_ON(!zone); if (min > (zone->z_end - zone->z_start)) { pr_err("ocmem: requested minimum size exceeds quota\n"); goto invalid_op_error; } if (max > (zone->z_end - zone->z_start)) { pr_err("ocmem: requested maximum size exceeds quota\n"); goto invalid_op_error; } if (min > zone->z_free) { pr_err("ocmem: out of memory for zone %d\n", owner); goto invalid_op_error; } retry = false; pr_debug("ocmem: do_allocate: %s request %p size %lx\n", get_name(owner), req, sz); retry_next_step: spanned_r = NULL; overlap_r = NULL; spanned_r = find_region(zone->z_head); overlap_r = find_region_intersection(zone->z_head, zone->z_head + sz); if (overlap_r == NULL) { region = create_region(); if (!region) { pr_err("ocmem: Unable to create region\n"); goto invalid_op_error; } /* no conflicting regions, schedule this region */ rc = zone->z_ops->allocate(zone, sz, &alloc_addr); if (rc) { pr_err("Zone Allocation operation failed\n"); goto internal_error; } /* update the request */ req->req_start = alloc_addr; req->req_end = alloc_addr + sz - 1; req->req_sz = sz; req->zone = zone; /* update request state */ CLEAR_STATE(req, R_FREE); CLEAR_STATE(req, R_PENDING); SET_STATE(req, R_ALLOCATED); SET_STATE(req, R_MUST_MAP); req->op = SCHED_NOP; /* attach the request to the region */ attach_req(region, req); populate_region(region, req); update_region_prio(region); /* update the tree with new region */ if (insert_region(region)) { pr_err("ocmem: Failed to insert the region\n"); zone->z_ops->free(zone, alloc_addr, sz); detach_req(region, req); update_region_prio(region); /* req will be destroyed by the caller */ goto internal_error; } if (retry) { SET_STATE(req, R_MUST_GROW); SET_STATE(req, R_PENDING); req->op = SCHED_GROW; return OP_PARTIAL; } } else if (spanned_r != NULL && overlap_r != NULL) { /* resolve conflicting regions based on priority */ if (overlap_r->max_prio < prio) { if (min == max) { req->req_start = zone->z_head; req->req_end = zone->z_head + sz - 1; req->req_sz = 0x0; req->edata = NULL; goto trigger_eviction; } else { /* Try to allocate atleast >= 'min' immediately */ sz -= step; if (sz < min) goto err_out_of_mem; retry = true; pr_debug("ocmem: Attempting with reduced size %lx\n", sz); goto retry_next_step; } } else if (overlap_r->max_prio > prio) { if (can_block == true) { SET_STATE(req, R_PENDING); SET_STATE(req, R_MUST_GROW); return OP_RESCHED; } else { if (min == max) { pr_err("Cannot allocate %lx synchronously\n", sz); goto err_out_of_mem; } else { sz -= step; if (sz < min) goto err_out_of_mem; retry = true; pr_debug("ocmem: Attempting reduced size %lx\n", sz); goto retry_next_step; } } } else { pr_err("ocmem: Undetermined behavior\n"); pr_err("ocmem: New Region %p Existing %p\n", region, overlap_r); /* This is serious enough to fail */ BUG(); } } else if (spanned_r == NULL && overlap_r != NULL) goto err_not_supported; return OP_COMPLETE; trigger_eviction: pr_debug("Trigger eviction of region %p\n", overlap_r); return OP_EVICT; err_not_supported: pr_err("ocmem: Scheduled unsupported operation\n"); return OP_FAIL; err_out_of_mem: pr_err("ocmem: Out of memory during allocation\n"); internal_error: destroy_region(region); invalid_op_error: return OP_FAIL; } /* Remove the request from eviction lists */ static void cancel_restore(struct ocmem_req *req) { struct ocmem_eviction_data *edata; if (!req) return; edata = req->eviction_info; if (!edata) return; if (list_empty(&edata->req_list)) return; list_del_init(&req->eviction_list); req->eviction_info = NULL; return; } static int sched_enqueue(struct ocmem_req *priv) { struct ocmem_req *next = NULL; mutex_lock(&sched_queue_mutex); SET_STATE(priv, R_ENQUEUED); list_add_tail(&priv->sched_list, &sched_queue[priv->owner]); pr_debug("enqueued req %p\n", priv); list_for_each_entry(next, &sched_queue[priv->owner], sched_list) { pr_debug("pending request %p for client %s\n", next, get_name(next->owner)); } mutex_unlock(&sched_queue_mutex); return 0; } static void sched_dequeue(struct ocmem_req *victim_req) { struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; int id; if (!victim_req) return; id = victim_req->owner; mutex_lock(&sched_queue_mutex); if (list_empty(&sched_queue[id])) goto dequeue_done; list_for_each_entry_safe(req, next, &sched_queue[id], sched_list) { if (req == victim_req) { pr_debug("ocmem: Cancelling pending request %p for %s\n", req, get_name(req->owner)); list_del_init(&victim_req->sched_list); CLEAR_STATE(victim_req, R_ENQUEUED); break; } } dequeue_done: mutex_unlock(&sched_queue_mutex); return; } static struct ocmem_req *ocmem_fetch_req(void) { int i; struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; mutex_lock(&sched_queue_mutex); for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) { if (list_empty(&sched_queue[i])) continue; list_for_each_entry_safe(req, next, &sched_queue[i], sched_list) { if (req) { pr_debug("ocmem: Fetched pending request %p\n", req); list_del(&req->sched_list); CLEAR_STATE(req, R_ENQUEUED); break; } } } mutex_unlock(&sched_queue_mutex); return req; } unsigned long process_quota(int id) { struct ocmem_zone *zone = NULL; if (is_blocked(id)) return 0; zone = get_zone(id); if (zone && zone->z_pool) return zone->z_end - zone->z_start; else return 0; } static int do_grow(struct ocmem_req *req) { struct ocmem_buf *buffer = NULL; bool can_block = true; int rc = 0; down_write(&req->rw_sem); buffer = req->buffer; /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_grow(req, can_block); mutex_unlock(&sched_mutex); if (rc == OP_FAIL) goto err_op_fail; if (rc == OP_RESCHED) { pr_debug("ocmem: Enqueue this allocation"); sched_enqueue(req); } else if (rc == OP_COMPLETE || rc == OP_PARTIAL) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_op_fail: up_write(&req->rw_sem); return -EINVAL; } static int process_grow(struct ocmem_req *req) { int rc = 0; unsigned long offset = 0; /* Attempt to grow the region */ rc = do_grow(req); if (rc < 0) return -EINVAL; rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } /* Notify the client about the buffer growth */ rc = dispatch_notification(req->owner, OCMEM_ALLOC_GROW, req->buffer); if (rc < 0) { pr_err("No notifier callback to cater for req %p event: %d\n", req, OCMEM_ALLOC_GROW); BUG(); } return 0; power_ctl_error: map_error: if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: return -EINVAL; } static int do_shrink(struct ocmem_req *req, unsigned long shrink_size) { int rc = 0; struct ocmem_buf *buffer = NULL; down_write(&req->rw_sem); buffer = req->buffer; /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_shrink(req, shrink_size); mutex_unlock(&sched_mutex); if (rc == OP_FAIL) goto err_op_fail; else if (rc == OP_COMPLETE) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_op_fail: up_write(&req->rw_sem); return -EINVAL; } static void ocmem_sched_wk_func(struct work_struct *work); DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func); static int ocmem_schedule_pending(void) { bool need_sched = false; int i = 0; for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) { if (!list_empty(&sched_queue[i])) { need_sched = true; break; } } if (need_sched == true) { cancel_delayed_work(&ocmem_sched_thread); schedule_delayed_work(&ocmem_sched_thread, msecs_to_jiffies(SCHED_DELAY)); pr_debug("ocmem: Scheduled delayed work\n"); } return 0; } static int do_free(struct ocmem_req *req) { int rc = 0; struct ocmem_buf *buffer = req->buffer; down_write(&req->rw_sem); if (is_mapped(req)) { pr_err("ocmem: Buffer needs to be unmapped before free\n"); goto err_free_fail; } pr_debug("ocmem: do_free: client %s req %p\n", get_name(req->owner), req); /* Grab the sched mutex */ mutex_lock(&sched_mutex); rc = __sched_free(req); mutex_unlock(&sched_mutex); switch (rc) { case OP_COMPLETE: buffer->addr = 0x0; buffer->len = 0x0; break; case OP_FAIL: default: goto err_free_fail; break; } up_write(&req->rw_sem); return 0; err_free_fail: up_write(&req->rw_sem); pr_err("ocmem: freeing req %p failed\n", req); return -EINVAL; } int process_free(int id, struct ocmem_handle *handle) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; unsigned long offset = 0; int rc = 0; mutex_lock(&free_mutex); if (is_blocked(id)) { pr_err("Client %d cannot request free\n", id); goto free_invalid; } req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) { pr_err("ocmem: No valid request to free\n"); goto free_invalid; } if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for free\n"); goto free_invalid; } if (req->edata != NULL) { pr_err("ocmem: Request %p(%2lx) yet to process eviction %p\n", req, req->state, req->edata); goto free_invalid; } if (is_pending_shrink(req)) { pr_err("ocmem: Request %p(%2lx) yet to process eviction\n", req, req->state); goto pending_shrink; } /* Remove the request from any restore lists */ if (req->eviction_info) cancel_restore(req); /* Remove the request from any pending opreations */ if (TEST_STATE(req, R_ENQUEUED)) { mutex_lock(&sched_mutex); sched_dequeue(req); mutex_unlock(&sched_mutex); } if (!TEST_STATE(req, R_FREE)) { if (TEST_STATE(req, R_MAPPED)) { /* unmap the interval and clear the memory */ rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) { pr_err("ocmem: Failed to unmap %p\n", req); goto free_fail; } /* Turn off the memory */ if (req->req_sz != 0) { offset = phys_to_offset(req->req_start); rc = ocmem_memory_off(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch OFF memory macros\n"); goto free_fail; } } if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); rc = do_free(req); if (rc < 0) { pr_err("ocmem: Failed to free %p\n", req); goto free_fail; } } else pr_debug("request %p was already shrunk to 0\n", req); } if (!TEST_STATE(req, R_FREE)) { /* Turn off the memory */ if (req->req_sz != 0) { offset = phys_to_offset(req->req_start); rc = ocmem_memory_off(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch OFF memory macros\n"); goto free_fail; } if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } /* free the allocation */ rc = do_free(req); if (rc < 0) return -EINVAL; } inc_ocmem_stat(zone_of(req), NR_FREES); ocmem_destroy_req(req); handle->req = NULL; ocmem_schedule_pending(); mutex_unlock(&free_mutex); return 0; free_fail: free_invalid: mutex_unlock(&free_mutex); return -EINVAL; pending_shrink: mutex_unlock(&free_mutex); return -EAGAIN; } static void ocmem_rdm_worker(struct work_struct *work) { int offset = 0; int rc = 0; int event; struct ocmem_rdm_work *work_data = container_of(work, struct ocmem_rdm_work, work); int id = work_data->id; struct ocmem_map_list *list = work_data->list; int direction = work_data->direction; struct ocmem_handle *handle = work_data->handle; struct ocmem_req *req = handle_to_req(handle); struct ocmem_buf *buffer = handle_to_buffer(handle); down_write(&req->rw_sem); offset = phys_to_offset(req->req_start); rc = ocmem_rdm_transfer(id, list, offset, direction); if (work_data->direction == TO_OCMEM) event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL; else event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL; up_write(&req->rw_sem); kfree(work_data); dispatch_notification(id, event, buffer); } int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle, struct ocmem_map_list *list, int direction) { struct ocmem_rdm_work *work_data = NULL; down_write(&req->rw_sem); work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC); if (!work_data) BUG(); work_data->handle = handle; work_data->list = list; work_data->id = req->owner; work_data->direction = direction; INIT_WORK(&work_data->work, ocmem_rdm_worker); up_write(&req->rw_sem); queue_work(ocmem_rdm_wq, &work_data->work); return 0; } int process_drop(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; int rc = 0; if (is_blocked(id)) { pr_err("Client %d cannot request drop\n", id); return -EINVAL; } if (is_tcm(id)) pr_err("Client %d cannot request drop\n", id); req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) return -EINVAL; if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for drop\n"); return -EINVAL; } if (TEST_STATE(req, R_MAPPED)) { rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) return -EINVAL; if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } else return -EINVAL; return 0; } int process_xfer_out(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not currently mapped\n"); goto transfer_out_error; } rc = queue_transfer(req, handle, list, TO_DDR); if (rc < 0) { pr_err("Failed to queue rdm transfer to DDR\n"); inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); goto transfer_out_error; } inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_DDR); return 0; transfer_out_error: return -EINVAL; } int process_xfer_in(int id, struct ocmem_handle *handle, struct ocmem_map_list *list) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not already mapped for transfer\n"); goto transfer_in_error; } inc_ocmem_stat(zone_of(req), NR_TRANSFERS_TO_OCMEM); rc = queue_transfer(req, handle, list, TO_OCMEM); if (rc < 0) { pr_err("Failed to queue rdm transfer to OCMEM\n"); inc_ocmem_stat(zone_of(req), NR_TRANSFER_FAILS); goto transfer_in_error; } return 0; transfer_in_error: return -EINVAL; } int process_shrink(int id, struct ocmem_handle *handle, unsigned long size) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; struct ocmem_eviction_data *edata = NULL; int rc = 0; if (is_blocked(id)) { pr_err("Client %d cannot request free\n", id); return -EINVAL; } req = handle_to_req(handle); buffer = handle_to_buffer(handle); if (!req) return -EINVAL; mutex_lock(&free_mutex); if (req->req_start != core_address(id, buffer->addr)) { pr_err("Invalid buffer handle passed for shrink\n"); goto shrink_fail; } edata = req->eviction_info; if (!edata) { pr_err("Unable to find eviction data\n"); goto shrink_fail; } pr_debug("Found edata %p in request %p\n", edata, req); inc_ocmem_stat(zone_of(req), NR_SHRINKS); if (size == 0) { pr_debug("req %p being shrunk to zero\n", req); if (is_mapped(req)) { rc = process_unmap(req, req->req_start, req->req_end); if (rc < 0) goto shrink_fail; if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); ocmem_disable_core_clock(); } rc = do_free(req); if (rc < 0) goto shrink_fail; SET_STATE(req, R_FREE); } else { rc = do_shrink(req, size); if (rc < 0) goto shrink_fail; } CLEAR_STATE(req, R_ALLOCATED); CLEAR_STATE(req, R_WF_SHRINK); SET_STATE(req, R_SHRUNK); if (atomic_dec_and_test(&edata->pending)) { pr_debug("ocmem: All conflicting allocations were shrunk\n"); complete(&edata->completion); } mutex_unlock(&free_mutex); return 0; shrink_fail: pr_err("ocmem: Failed to shrink request %p of %s\n", req, get_name(req->owner)); mutex_unlock(&free_mutex); return -EINVAL; } int process_xfer(int id, struct ocmem_handle *handle, struct ocmem_map_list *list, int direction) { int rc = 0; if (is_tcm(id)) { WARN(1, "Mapping operation is invalid for client\n"); return -EINVAL; } if (direction == TO_DDR) rc = process_xfer_out(id, handle, list); else if (direction == TO_OCMEM) rc = process_xfer_in(id, handle, list); return rc; } static struct ocmem_eviction_data *init_eviction(int id) { struct ocmem_eviction_data *edata = NULL; int prio = ocmem_client_table[id].priority; edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC); if (!edata) { pr_err("ocmem: Could not allocate eviction data\n"); return NULL; } INIT_LIST_HEAD(&edata->victim_list); INIT_LIST_HEAD(&edata->req_list); edata->prio = prio; atomic_set(&edata->pending, 0); return edata; } static void free_eviction(struct ocmem_eviction_data *edata) { if (!edata) return; if (!list_empty(&edata->req_list)) pr_err("ocmem: Eviction data %p not empty\n", edata); kfree(edata); edata = NULL; } static bool is_overlapping(struct ocmem_req *new, struct ocmem_req *old) { if (!new || !old) return false; pr_debug("check overlap [%lx -- %lx] on [%lx -- %lx]\n", new->req_start, new->req_end, old->req_start, old->req_end); if ((new->req_start < old->req_start && new->req_end >= old->req_start) || (new->req_start >= old->req_start && new->req_start <= old->req_end && new->req_end >= old->req_end)) { pr_debug("request %p overlaps with existing req %p\n", new, old); return true; } return false; } static int __evict_common(struct ocmem_eviction_data *edata, struct ocmem_req *req) { struct rb_node *rb_node = NULL; struct ocmem_req *e_req = NULL; bool needs_eviction = false; int j = 0; for (rb_node = rb_first(&sched_tree); rb_node; rb_node = rb_next(rb_node)) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); if (tmp_region->max_prio < edata->prio) { for (j = edata->prio - 1; j > NO_PRIO; j--) { needs_eviction = false; e_req = find_req_match(j, tmp_region); if (!e_req) continue; if (edata->passive == true) { needs_eviction = true; } else { needs_eviction = is_overlapping(req, e_req); } if (needs_eviction) { pr_debug("adding %p in region %p to eviction list\n", e_req, tmp_region); SET_STATE(e_req, R_MUST_SHRINK); list_add_tail( &e_req->eviction_list, &edata->req_list); atomic_inc(&edata->pending); e_req->eviction_info = edata; } } } else { pr_debug("Skipped region %p\n", tmp_region); } } pr_debug("%d requests will be evicted\n", atomic_read(&edata->pending)); return atomic_read(&edata->pending); } static void trigger_eviction(struct ocmem_eviction_data *edata) { struct ocmem_req *req = NULL; struct ocmem_req *next = NULL; struct ocmem_buf buffer; if (!edata) return; BUG_ON(atomic_read(&edata->pending) == 0); init_completion(&edata->completion); list_for_each_entry_safe(req, next, &edata->req_list, eviction_list) { if (req) { pr_debug("ocmem: Evicting request %p\n", req); buffer.addr = req->req_start; buffer.len = 0x0; CLEAR_STATE(req, R_MUST_SHRINK); dispatch_notification(req->owner, OCMEM_ALLOC_SHRINK, &buffer); SET_STATE(req, R_WF_SHRINK); } } return; } int process_evict(int id) { struct ocmem_eviction_data *edata = NULL; int rc = 0; edata = init_eviction(id); if (!edata) return -EINVAL; edata->passive = true; mutex_lock(&sched_mutex); rc = __evict_common(edata, NULL); if (rc == 0) goto skip_eviction; trigger_eviction(edata); evictions[id] = edata; mutex_unlock(&sched_mutex); wait_for_completion(&edata->completion); return 0; skip_eviction: evictions[id] = NULL; mutex_unlock(&sched_mutex); return 0; } static int run_evict(struct ocmem_req *req) { struct ocmem_eviction_data *edata = NULL; int rc = 0; if (!req) return -EINVAL; edata = init_eviction(req->owner); if (!edata) return -EINVAL; edata->passive = false; mutex_lock(&free_mutex); rc = __evict_common(edata, req); if (rc == 0) goto skip_eviction; trigger_eviction(edata); pr_debug("ocmem: attaching eviction %p to request %p", edata, req); req->edata = edata; mutex_unlock(&free_mutex); wait_for_completion(&edata->completion); pr_debug("ocmem: eviction completed successfully\n"); return 0; skip_eviction: pr_err("ocmem: Unable to run eviction\n"); free_eviction(edata); req->edata = NULL; mutex_unlock(&free_mutex); return 0; } static int __restore_common(struct ocmem_eviction_data *edata) { struct ocmem_req *req = NULL; if (!edata) return -EINVAL; while (!list_empty(&edata->req_list)) { req = list_first_entry(&edata->req_list, struct ocmem_req, eviction_list); list_del_init(&req->eviction_list); pr_debug("ocmem: restoring evicted request %p\n", req); req->edata = NULL; req->eviction_info = NULL; req->op = SCHED_ALLOCATE; inc_ocmem_stat(zone_of(req), NR_RESTORES); sched_enqueue(req); } pr_debug("Scheduled all evicted regions\n"); return 0; } static int sched_restore(struct ocmem_req *req) { int rc = 0; if (!req) return -EINVAL; if (!req->edata) return 0; mutex_lock(&free_mutex); rc = __restore_common(req->edata); mutex_unlock(&free_mutex); if (rc < 0) return -EINVAL; free_eviction(req->edata); req->edata = NULL; return 0; } int process_restore(int id) { struct ocmem_eviction_data *edata = evictions[id]; int rc = 0; if (!edata) { pr_err("Client %s invoked restore without any eviction\n", get_name(id)); return -EINVAL; } mutex_lock(&free_mutex); rc = __restore_common(edata); mutex_unlock(&free_mutex); if (rc < 0) { pr_err("Failed to restore evicted requests\n"); return -EINVAL; } free_eviction(edata); evictions[id] = NULL; ocmem_schedule_pending(); return 0; } static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait) { int rc = 0; int ret = 0; struct ocmem_buf *buffer = req->buffer; down_write(&req->rw_sem); retry_allocate: /* Take the scheduler mutex */ mutex_lock(&sched_mutex); rc = __sched_allocate(req, can_block, can_wait); mutex_unlock(&sched_mutex); if (rc == OP_EVICT) { mutex_lock(&allocation_mutex); ret = run_evict(req); if (ret == 0) { rc = sched_restore(req); if (rc < 0) { pr_err("Failed to restore for req %p\n", req); mutex_unlock(&allocation_mutex); goto err_allocate_fail; } req->edata = NULL; pr_debug("Attempting to re-allocate req %p\n", req); req->req_start = 0x0; req->req_end = 0x0; mutex_unlock(&allocation_mutex); goto retry_allocate; } else { mutex_unlock(&allocation_mutex); goto err_allocate_fail; } } if (rc == OP_FAIL) { inc_ocmem_stat(zone_of(req), NR_ALLOCATION_FAILS); goto err_allocate_fail; } if (rc == OP_RESCHED) { buffer->addr = 0x0; buffer->len = 0x0; pr_debug("ocmem: Enqueuing req %p\n", req); sched_enqueue(req); } else if (rc == OP_PARTIAL) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; inc_ocmem_stat(zone_of(req), NR_RANGE_ALLOCATIONS); pr_debug("ocmem: Enqueuing req %p\n", req); sched_enqueue(req); } else if (rc == OP_COMPLETE) { buffer->addr = device_address(req->owner, req->req_start); buffer->len = req->req_sz; } up_write(&req->rw_sem); return 0; err_allocate_fail: up_write(&req->rw_sem); return -EINVAL; } static int do_dump(struct ocmem_req *req, unsigned long addr) { void __iomem *req_vaddr; unsigned long offset = 0x0; int rc = 0; down_write(&req->rw_sem); offset = phys_to_offset(req->req_start); req_vaddr = ocmem_vaddr + offset; if (!req_vaddr) goto err_do_dump; rc = ocmem_enable_dump(req->owner, offset, req->req_sz); if (rc < 0) goto err_do_dump; pr_debug("Dumping client %s buffer ocmem p: %lx (v: %p) to ddr %lx\n", get_name(req->owner), req->req_start, req_vaddr, addr); memcpy((void *)addr, req_vaddr, req->req_sz); rc = ocmem_disable_dump(req->owner, offset, req->req_sz); if (rc < 0) pr_err("Failed to secure request %p of %s after dump\n", req, get_name(req->owner)); up_write(&req->rw_sem); return 0; err_do_dump: up_write(&req->rw_sem); return -EINVAL; } int process_allocate(int id, struct ocmem_handle *handle, unsigned long min, unsigned long max, unsigned long step, bool can_block, bool can_wait) { struct ocmem_req *req = NULL; struct ocmem_buf *buffer = NULL; int rc = 0; unsigned long offset = 0; /* sanity checks */ if (is_blocked(id)) { pr_err("Client %d cannot request allocation\n", id); return -EINVAL; } if (handle->req != NULL) { pr_err("Invalid handle passed in\n"); return -EINVAL; } buffer = handle_to_buffer(handle); BUG_ON(buffer == NULL); /* prepare a request structure to represent this transaction */ req = ocmem_create_req(); if (!req) return -ENOMEM; req->owner = id; req->req_min = min; req->req_max = max; req->req_step = step; req->prio = ocmem_client_table[id].priority; req->op = SCHED_ALLOCATE; req->buffer = buffer; inc_ocmem_stat(zone_of(req), NR_REQUESTS); rc = do_allocate(req, can_block, can_wait); if (rc < 0) goto do_allocate_error; inc_ocmem_stat(zone_of(req), NR_SYNC_ALLOCATIONS); handle->req = req; if (req->req_sz != 0) { rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } } return 0; power_ctl_error: process_unmap(req, req->req_start, req->req_end); map_error: handle->req = NULL; do_free(req); if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: do_allocate_error: ocmem_destroy_req(req); return -EINVAL; } int process_delayed_allocate(struct ocmem_req *req) { struct ocmem_handle *handle = NULL; int rc = 0; int id = req->owner; unsigned long offset = 0; handle = req_to_handle(req); BUG_ON(handle == NULL); rc = do_allocate(req, true, false); if (rc < 0) goto do_allocate_error; /* The request can still be pending */ if (TEST_STATE(req, R_PENDING)) return 0; inc_ocmem_stat(zone_of(req), NR_ASYNC_ALLOCATIONS); if (req->req_sz != 0) { rc = ocmem_enable_core_clock(); if (rc < 0) goto core_clock_fail; if (is_iface_access(req->owner)) { rc = ocmem_enable_iface_clock(); if (rc < 0) goto iface_clock_fail; } rc = process_map(req, req->req_start, req->req_end); if (rc < 0) goto map_error; offset = phys_to_offset(req->req_start); rc = ocmem_memory_on(req->owner, offset, req->req_sz); if (rc < 0) { pr_err("Failed to switch ON memory macros\n"); goto power_ctl_error; } } /* Notify the client about the buffer growth */ rc = dispatch_notification(id, OCMEM_ALLOC_GROW, req->buffer); if (rc < 0) { pr_err("No notifier callback to cater for req %p event: %d\n", req, OCMEM_ALLOC_GROW); BUG(); } return 0; power_ctl_error: process_unmap(req, req->req_start, req->req_end); map_error: handle->req = NULL; do_free(req); if (is_iface_access(req->owner)) ocmem_disable_iface_clock(); iface_clock_fail: ocmem_disable_core_clock(); core_clock_fail: do_allocate_error: ocmem_destroy_req(req); return -EINVAL; } int process_dump(int id, struct ocmem_handle *handle, unsigned long addr) { struct ocmem_req *req = NULL; int rc = 0; req = handle_to_req(handle); if (!req) return -EINVAL; if (!is_mapped(req)) { pr_err("Buffer is not mapped\n"); goto dump_error; } inc_ocmem_stat(zone_of(req), NR_DUMP_REQUESTS); mutex_lock(&sched_mutex); rc = do_dump(req, addr); mutex_unlock(&sched_mutex); if (rc < 0) goto dump_error; inc_ocmem_stat(zone_of(req), NR_DUMP_COMPLETE); return 0; dump_error: pr_err("Dumping OCMEM memory failed for client %d\n", id); return -EINVAL; } static void ocmem_sched_wk_func(struct work_struct *work) { struct ocmem_buf *buffer = NULL; struct ocmem_handle *handle = NULL; struct ocmem_req *req = ocmem_fetch_req(); if (!req) { pr_debug("No Pending Requests found\n"); return; } pr_debug("ocmem: sched_wk pending req %p\n", req); handle = req_to_handle(req); buffer = handle_to_buffer(handle); BUG_ON(req->op == SCHED_NOP); switch (req->op) { case SCHED_GROW: process_grow(req); break; case SCHED_ALLOCATE: process_delayed_allocate(req); break; default: pr_err("ocmem: Unknown operation encountered\n"); break; } return; } static int ocmem_allocations_show(struct seq_file *f, void *dummy) { struct rb_node *rb_node = NULL; struct ocmem_req *req = NULL; unsigned j; mutex_lock(&sched_mutex); for (rb_node = rb_first(&sched_tree); rb_node; rb_node = rb_next(rb_node)) { struct ocmem_region *tmp_region = NULL; tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb); for (j = MAX_OCMEM_PRIO - 1; j > NO_PRIO; j--) { req = find_req_match(j, tmp_region); if (req) { seq_printf(f, "owner: %s 0x%lx -- 0x%lx size 0x%lx [state: %2lx]\n", get_name(req->owner), req->req_start, req->req_end, req->req_sz, req->state); } } } mutex_unlock(&sched_mutex); return 0; } static int ocmem_allocations_open(struct inode *inode, struct file *file) { return single_open(file, ocmem_allocations_show, inode->i_private); } static const struct file_operations allocations_show_fops = { .open = ocmem_allocations_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; int ocmem_sched_init(struct platform_device *pdev) { int i = 0; struct ocmem_plat_data *pdata = NULL; struct device *dev = &pdev->dev; sched_tree = RB_ROOT; pdata = platform_get_drvdata(pdev); mutex_init(&allocation_mutex); mutex_init(&free_mutex); mutex_init(&sched_mutex); mutex_init(&sched_queue_mutex); ocmem_vaddr = pdata->vbase; for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++) INIT_LIST_HEAD(&sched_queue[i]); mutex_init(&rdm_mutex); INIT_LIST_HEAD(&rdm_queue); ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0); if (!ocmem_rdm_wq) return -ENOMEM; ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0); if (!ocmem_eviction_wq) return -ENOMEM; if (!debugfs_create_file("allocations", S_IRUGO, pdata->debug_node, NULL, &allocations_show_fops)) { dev_err(dev, "Unable to create debugfs node for scheduler\n"); return -EBUSY; } return 0; }
gpl-2.0
ratnamanoj/kernel-4.0.4
lib/kstrtox.c
1411
9650
/* * Convert integer string representation to an integer. * If an integer doesn't fit into specified type, -E is returned. * * Integer starts with optional sign. * kstrtou*() functions do not accept sign "-". * * Radix 0 means autodetection: leading "0x" implies radix 16, * leading "0" implies radix 8, otherwise radix is 10. * Autodetection hints work after optional sign, but not before. * * If -E is returned, result is not touched. */ #include <linux/ctype.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> #include <linux/types.h> #include <asm/uaccess.h> #include "kstrtox.h" const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) { if (*base == 0) { if (s[0] == '0') { if (_tolower(s[1]) == 'x' && isxdigit(s[2])) *base = 16; else *base = 8; } else *base = 10; } if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') s += 2; return s; } /* * Convert non-negative integer string representation in explicitly given radix * to an integer. * Return number of characters consumed maybe or-ed with overflow bit. * If overflow occurs, result integer (incorrect) is still returned. * * Don't you dare use this function. */ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p) { unsigned long long res; unsigned int rv; int overflow; res = 0; rv = 0; overflow = 0; while (*s) { unsigned int val; if ('0' <= *s && *s <= '9') val = *s - '0'; else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') val = _tolower(*s) - 'a' + 10; else break; if (val >= base) break; /* * Check for overflow only if we are within range of * it in the max base we support (16) */ if (unlikely(res & (~0ull << 60))) { if (res > div_u64(ULLONG_MAX - val, base)) overflow = 1; } res = res * base + val; rv++; s++; } *p = res; if (overflow) rv |= KSTRTOX_OVERFLOW; return rv; } static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) { unsigned long long _res; unsigned int rv; s = _parse_integer_fixup_radix(s, &base); rv = _parse_integer(s, base, &_res); if (rv & KSTRTOX_OVERFLOW) return -ERANGE; if (rv == 0) return -EINVAL; s += rv; if (*s == '\n') s++; if (*s) return -EINVAL; *res = _res; return 0; } /** * kstrtoull - convert a string to an unsigned long long * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign, but not a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { if (s[0] == '+') s++; return _kstrtoull(s, base, res); } EXPORT_SYMBOL(kstrtoull); /** * kstrtoll - convert a string to a long long * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign or a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ int kstrtoll(const char *s, unsigned int base, long long *res) { unsigned long long tmp; int rv; if (s[0] == '-') { rv = _kstrtoull(s + 1, base, &tmp); if (rv < 0) return rv; if ((long long)(-tmp) >= 0) return -ERANGE; *res = -tmp; } else { rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; if ((long long)tmp < 0) return -ERANGE; *res = tmp; } return 0; } EXPORT_SYMBOL(kstrtoll); /* Internal, do not use. */ int _kstrtoul(const char *s, unsigned int base, unsigned long *res) { unsigned long long tmp; int rv; rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; if (tmp != (unsigned long long)(unsigned long)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(_kstrtoul); /* Internal, do not use. */ int _kstrtol(const char *s, unsigned int base, long *res) { long long tmp; int rv; rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; if (tmp != (long long)(long)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(_kstrtol); /** * kstrtouint - convert a string to an unsigned int * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign, but not a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ int kstrtouint(const char *s, unsigned int base, unsigned int *res) { unsigned long long tmp; int rv; rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; if (tmp != (unsigned long long)(unsigned int)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtouint); /** * kstrtoint - convert a string to an int * @s: The start of the string. The string must be null-terminated, and may also * include a single newline before its terminating null. The first character * may also be a plus sign or a minus sign. * @base: The number base to use. The maximum supported base is 16. If base is * given as 0, then the base of the string is automatically detected with the * conventional semantics - If it begins with 0x the number will be parsed as a * hexadecimal (case insensitive), if it otherwise begins with 0, it will be * parsed as an octal number. Otherwise it will be parsed as a decimal. * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Used as a replacement for the obsolete simple_strtoull. Return code must * be checked. */ int kstrtoint(const char *s, unsigned int base, int *res) { long long tmp; int rv; rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; if (tmp != (long long)(int)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtoint); int kstrtou16(const char *s, unsigned int base, u16 *res) { unsigned long long tmp; int rv; rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; if (tmp != (unsigned long long)(u16)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtou16); int kstrtos16(const char *s, unsigned int base, s16 *res) { long long tmp; int rv; rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; if (tmp != (long long)(s16)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtos16); int kstrtou8(const char *s, unsigned int base, u8 *res) { unsigned long long tmp; int rv; rv = kstrtoull(s, base, &tmp); if (rv < 0) return rv; if (tmp != (unsigned long long)(u8)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtou8); int kstrtos8(const char *s, unsigned int base, s8 *res) { long long tmp; int rv; rv = kstrtoll(s, base, &tmp); if (rv < 0) return rv; if (tmp != (long long)(s8)tmp) return -ERANGE; *res = tmp; return 0; } EXPORT_SYMBOL(kstrtos8); #define kstrto_from_user(f, g, type) \ int f(const char __user *s, size_t count, unsigned int base, type *res) \ { \ /* sign, base 2 representation, newline, terminator */ \ char buf[1 + sizeof(type) * 8 + 1 + 1]; \ \ count = min(count, sizeof(buf) - 1); \ if (copy_from_user(buf, s, count)) \ return -EFAULT; \ buf[count] = '\0'; \ return g(buf, base, res); \ } \ EXPORT_SYMBOL(f) kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long); kstrto_from_user(kstrtoll_from_user, kstrtoll, long long); kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long); kstrto_from_user(kstrtol_from_user, kstrtol, long); kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int); kstrto_from_user(kstrtoint_from_user, kstrtoint, int); kstrto_from_user(kstrtou16_from_user, kstrtou16, u16); kstrto_from_user(kstrtos16_from_user, kstrtos16, s16); kstrto_from_user(kstrtou8_from_user, kstrtou8, u8); kstrto_from_user(kstrtos8_from_user, kstrtos8, s8);
gpl-2.0
AndroidOpenSourceXperia/android_kernel_sony_u8500
arch/sparc/mm/tlb.c
1667
2451
/* arch/sparc64/mm/tlb.c * * Copyright (C) 2004 David S. Miller <davem@redhat.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/tlb.h> /* Heavily inspired by the ppc64 code. */ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct mm_struct *mm = tb->mm; if (!tb->tlb_nr) goto out; flush_tsb_user(tb); if (CTX_VALID(mm->context)) { if (tb->tlb_nr == 1) { global_flush_tlb_page(mm, tb->vaddrs[0]); } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } } tb->tlb_nr = 0; out: put_cpu_var(tlb_batch); } void arch_enter_lazy_mmu_mode(void) { struct tlb_batch *tb = &__get_cpu_var(tlb_batch); tb->active = 1; } void arch_leave_lazy_mmu_mode(void) { struct tlb_batch *tb = &__get_cpu_var(tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (pte_exec(orig)) vaddr |= 0x1UL; if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ mapping = page_mapping(page); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_page_all(mm, page); } no_cache_flush: if (fullmm) { put_cpu_var(tlb_batch); return; } nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { flush_tsb_user_page(mm, vaddr); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) tb->mm = mm; tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); }
gpl-2.0
petalyaa/htc-evo3d-shooteru
drivers/net/wan/cycx_main.c
2691
9431
/* * cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module. * * Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo * * Based on sdlamain.c by Gene Kozin <genek@compuserve.com> & * Jaspreet Singh <jaspreet@sangoma.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * ============================================================================ * Please look at the bitkeeper changelog (or any other scm tool that ends up * importing bitkeeper changelog or that replaces bitkeeper in the future as * main tool for linux development). * * 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks, * some cleanups * 2000/07/13 acme remove useless #ifdef MODULE and crap * #if KERNEL_VERSION > blah * 2000/07/06 acme __exit at cyclomx_cleanup * 2000/04/02 acme dprintk and cycx_debug * module_init/module_exit * 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count * and cyclomx_close to cyclomx_mod_dec_use_count * 2000/01/08 acme cleanup * 1999/11/06 acme cycx_down back to life (it needs to be * called to iounmap the dpmbase) * 1999/08/09 acme removed references to enable_tx_int * use spinlocks instead of cli/sti in * cyclomx_set_state * 1999/05/19 acme works directly linked into the kernel * init_waitqueue_head for 2.3.* kernel * 1999/05/18 acme major cleanup (polling not needed), etc * 1998/08/28 acme minor cleanup (ioctls for firmware deleted) * queue_task activated * 1998/08/08 acme Initial version. */ #include <linux/stddef.h> /* offsetof(), etc. */ #include <linux/errno.h> /* return codes */ #include <linux/string.h> /* inline memset(), etc. */ #include <linux/slab.h> /* kmalloc(), kfree() */ #include <linux/kernel.h> /* printk(), and other useful stuff */ #include <linux/module.h> /* support for loadable modules */ #include <linux/ioport.h> /* request_region(), release_region() */ #include <linux/wanrouter.h> /* WAN router definitions */ #include <linux/cyclomx.h> /* cyclomx common user API definitions */ #include <linux/init.h> /* __init (when not using as a module) */ unsigned int cycx_debug; MODULE_AUTHOR("Arnaldo Carvalho de Melo"); MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver."); MODULE_LICENSE("GPL"); module_param(cycx_debug, int, 0); MODULE_PARM_DESC(cycx_debug, "cyclomx debug level"); /* Defines & Macros */ #define CYCX_DRV_VERSION 0 /* version number */ #define CYCX_DRV_RELEASE 11 /* release (minor version) number */ #define CYCX_MAX_CARDS 1 /* max number of adapters */ #define CONFIG_CYCX_CARDS 1 /* Function Prototypes */ /* WAN link driver entry points */ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf); static int cycx_wan_shutdown(struct wan_device *wandev); /* Miscellaneous functions */ static irqreturn_t cycx_isr(int irq, void *dev_id); /* Global Data * Note: All data must be explicitly initialized!!! */ /* private data */ static const char cycx_drvname[] = "cyclomx"; static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver"; static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " "<acme@conectiva.com.br>"; static int cycx_ncards = CONFIG_CYCX_CARDS; static struct cycx_device *cycx_card_array; /* adapter data space */ /* Kernel Loadable Module Entry Points */ /* * Module 'insert' entry point. * o print announcement * o allocate adapter data space * o initialize static data * o register all cards with WAN router * o calibrate Cyclom 2X shared memory access delay. * * Return: 0 Ok * < 0 error. * Context: process */ static int __init cycx_init(void) { int cnt, err = -ENOMEM; printk(KERN_INFO "%s v%u.%u %s\n", cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE, cycx_copyright); /* Verify number of cards and allocate adapter data space */ cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS); cycx_ncards = max_t(int, cycx_ncards, 1); cycx_card_array = kcalloc(cycx_ncards, sizeof(struct cycx_device), GFP_KERNEL); if (!cycx_card_array) goto out; /* Register adapters with WAN router */ for (cnt = 0; cnt < cycx_ncards; ++cnt) { struct cycx_device *card = &cycx_card_array[cnt]; struct wan_device *wandev = &card->wandev; sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1); wandev->magic = ROUTER_MAGIC; wandev->name = card->devname; wandev->private = card; wandev->setup = cycx_wan_setup; wandev->shutdown = cycx_wan_shutdown; err = register_wan_device(wandev); if (err) { printk(KERN_ERR "%s: %s registration failed with " "error %d!\n", cycx_drvname, card->devname, err); break; } } err = -ENODEV; if (!cnt) { kfree(cycx_card_array); goto out; } err = 0; cycx_ncards = cnt; /* adjust actual number of cards */ out: return err; } /* * Module 'remove' entry point. * o unregister all adapters from the WAN router * o release all remaining system resources */ static void __exit cycx_exit(void) { int i = 0; for (; i < cycx_ncards; ++i) { struct cycx_device *card = &cycx_card_array[i]; unregister_wan_device(card->devname); } kfree(cycx_card_array); } /* WAN Device Driver Entry Points */ /* * Setup/configure WAN link driver. * o check adapter state * o make sure firmware is present in configuration * o allocate interrupt vector * o setup Cyclom 2X hardware * o call appropriate routine to perform protocol-specific initialization * * This function is called when router handles ROUTER_SETUP IOCTL. The * configuration structure is in kernel memory (including extended data, if * any). */ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf) { int rc = -EFAULT; struct cycx_device *card; int irq; /* Sanity checks */ if (!wandev || !wandev->private || !conf) goto out; card = wandev->private; rc = -EBUSY; if (wandev->state != WAN_UNCONFIGURED) goto out; rc = -EINVAL; if (!conf->data_size || !conf->data) { printk(KERN_ERR "%s: firmware not found in configuration " "data!\n", wandev->name); goto out; } if (conf->irq <= 0) { printk(KERN_ERR "%s: can't configure without IRQ!\n", wandev->name); goto out; } /* Allocate IRQ */ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */ if (request_irq(irq, cycx_isr, 0, wandev->name, card)) { printk(KERN_ERR "%s: can't reserve IRQ %d!\n", wandev->name, irq); goto out; } /* Configure hardware, load firmware, etc. */ memset(&card->hw, 0, sizeof(card->hw)); card->hw.irq = irq; card->hw.dpmsize = CYCX_WINDOWSIZE; card->hw.fwid = CFID_X25_2X; spin_lock_init(&card->lock); init_waitqueue_head(&card->wait_stats); rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr); if (rc) goto out_irq; /* Initialize WAN device data space */ wandev->irq = irq; wandev->dma = wandev->ioport = 0; wandev->maddr = (unsigned long)card->hw.dpmbase; wandev->msize = card->hw.dpmsize; wandev->hw_opt[2] = 0; wandev->hw_opt[3] = card->hw.fwid; /* Protocol-specific initialization */ switch (card->hw.fwid) { #ifdef CONFIG_CYCLOMX_X25 case CFID_X25_2X: rc = cycx_x25_wan_init(card, conf); break; #endif default: printk(KERN_ERR "%s: this firmware is not supported!\n", wandev->name); rc = -EINVAL; } if (rc) { cycx_down(&card->hw); goto out_irq; } rc = 0; out: return rc; out_irq: free_irq(irq, card); goto out; } /* * Shut down WAN link driver. * o shut down adapter hardware * o release system resources. * * This function is called by the router when device is being unregistered or * when it handles ROUTER_DOWN IOCTL. */ static int cycx_wan_shutdown(struct wan_device *wandev) { int ret = -EFAULT; struct cycx_device *card; /* sanity checks */ if (!wandev || !wandev->private) goto out; ret = 0; if (wandev->state == WAN_UNCONFIGURED) goto out; card = wandev->private; wandev->state = WAN_UNCONFIGURED; cycx_down(&card->hw); printk(KERN_INFO "%s: irq %d being freed!\n", wandev->name, wandev->irq); free_irq(wandev->irq, card); out: return ret; } /* Miscellaneous */ /* * Cyclom 2X Interrupt Service Routine. * o acknowledge Cyclom 2X hardware interrupt. * o call protocol-specific interrupt service routine, if any. */ static irqreturn_t cycx_isr(int irq, void *dev_id) { struct cycx_device *card = dev_id; if (card->wandev.state == WAN_UNCONFIGURED) goto out; if (card->in_isr) { printk(KERN_WARNING "%s: interrupt re-entrancy on IRQ %d!\n", card->devname, card->wandev.irq); goto out; } if (card->isr) card->isr(card); return IRQ_HANDLED; out: return IRQ_NONE; } /* Set WAN device state. */ void cycx_set_state(struct cycx_device *card, int state) { unsigned long flags; char *string_state = NULL; spin_lock_irqsave(&card->lock, flags); if (card->wandev.state != state) { switch (state) { case WAN_CONNECTED: string_state = "connected!"; break; case WAN_DISCONNECTED: string_state = "disconnected!"; break; } printk(KERN_INFO "%s: link %s\n", card->devname, string_state); card->wandev.state = state; } card->state_tick = jiffies; spin_unlock_irqrestore(&card->lock, flags); } module_init(cycx_init); module_exit(cycx_exit);
gpl-2.0
dianlujitao/android_kernel_zte_msm8994
drivers/media/dvb-frontends/dib0090.c
2691
77881
/* * Linux-DVB Driver for DiBcom's DiB0090 base-band RF Tuner. * * Copyright (C) 2005-9 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * This code is more or less generated from another driver, please * excuse some codingstyle oddities. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_frontend.h" #include "dib0090.h" #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { \ if (debug) { \ printk(KERN_DEBUG "DiB0090: "); \ printk(args); \ printk("\n"); \ } \ } while (0) #define CONFIG_SYS_DVBT #define CONFIG_SYS_ISDBT #define CONFIG_BAND_CBAND #define CONFIG_BAND_VHF #define CONFIG_BAND_UHF #define CONFIG_DIB0090_USE_PWM_AGC #define EN_LNA0 0x8000 #define EN_LNA1 0x4000 #define EN_LNA2 0x2000 #define EN_LNA3 0x1000 #define EN_MIX0 0x0800 #define EN_MIX1 0x0400 #define EN_MIX2 0x0200 #define EN_MIX3 0x0100 #define EN_IQADC 0x0040 #define EN_PLL 0x0020 #define EN_TX 0x0010 #define EN_BB 0x0008 #define EN_LO 0x0004 #define EN_BIAS 0x0001 #define EN_IQANA 0x0002 #define EN_DIGCLK 0x0080 /* not in the 0x24 reg, only in 0x1b */ #define EN_CRYSTAL 0x0002 #define EN_UHF 0x22E9 #define EN_VHF 0x44E9 #define EN_LBD 0x11E9 #define EN_SBD 0x44E9 #define EN_CAB 0x88E9 /* Calibration defines */ #define DC_CAL 0x1 #define WBD_CAL 0x2 #define TEMP_CAL 0x4 #define CAPTRIM_CAL 0x8 #define KROSUS_PLL_LOCKED 0x800 #define KROSUS 0x2 /* Use those defines to identify SOC version */ #define SOC 0x02 #define SOC_7090_P1G_11R1 0x82 #define SOC_7090_P1G_21R1 0x8a #define SOC_8090_P1G_11R1 0x86 #define SOC_8090_P1G_21R1 0x8e /* else use thos ones to check */ #define P1A_B 0x0 #define P1C 0x1 #define P1D_E_F 0x3 #define P1G 0x7 #define P1G_21R2 0xf #define MP001 0x1 /* Single 9090/8096 */ #define MP005 0x4 /* Single Sband */ #define MP008 0x6 /* Dual diversity VHF-UHF-LBAND */ #define MP009 0x7 /* Dual diversity 29098 CBAND-UHF-LBAND-SBAND */ #define pgm_read_word(w) (*w) struct dc_calibration; struct dib0090_tuning { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 switch_trim; u8 lna_tune; u16 lna_bias; u16 v2i; u16 mix; u16 load; u16 tuner_enable; }; struct dib0090_pll { u32 max_freq; /* for every frequency less than or equal to that field: this information is correct */ u8 vco_band; u8 hfdiv_code; u8 hfdiv; u8 topresc; }; struct dib0090_identity { u8 version; u8 product; u8 p1g; u8 in_soc; }; struct dib0090_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; const struct dib0090_config *config; u8 current_band; enum frontend_tune_state tune_state; u32 current_rf; u16 wbd_offset; s16 wbd_target; /* in dB */ s16 rf_gain_limit; /* take-over-point: where to split between bb and rf gain */ s16 current_gain; /* keeps the currently programmed gain */ u8 agc_step; /* new binary search */ u16 gain[2]; /* for channel monitoring */ const u16 *rf_ramp; const u16 *bb_ramp; /* for the software AGC ramps */ u16 bb_1_def; u16 rf_lt_def; u16 gain_reg[4]; /* for the captrim/dc-offset search */ s8 step; s16 adc_diff; s16 min_adc_diff; s8 captrim; s8 fcaptrim; const struct dc_calibration *dc; u16 bb6, bb7; const struct dib0090_tuning *current_tune_table_index; const struct dib0090_pll *current_pll_table_index; u8 tuner_is_tuned; u8 agc_freeze; struct dib0090_identity identity; u32 rf_request; u8 current_standard; u8 calibrate; u32 rest; u16 bias; s16 temperature; u8 wbd_calibration_gain; const struct dib0090_wbd_slope *current_wbd_table; u16 wbdmux; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[3]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; struct dib0090_fw_state { struct i2c_adapter *i2c; struct dvb_frontend *fe; struct dib0090_identity identity; const struct dib0090_config *config; /* for the I2C transfer */ struct i2c_msg msg; u8 i2c_write_buffer[2]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 1; state->msg[1].addr = state->config->i2c_address; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c, state->msg, 2) != 2) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = reg & 0xff; state->i2c_write_buffer[1] = val >> 8; state->i2c_write_buffer[2] = val & 0xff; memset(state->msg, 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->config->i2c_address; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 3; if (i2c_transfer(state->i2c, state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = I2C_M_RD; state->msg.buf = state->i2c_read_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C read failed\n"); ret = 0; } else ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = val >> 8; state->i2c_write_buffer[1] = val & 0xff; memset(&state->msg, 0, sizeof(struct i2c_msg)); state->msg.addr = reg; state->msg.flags = 0; state->msg.buf = state->i2c_write_buffer; state->msg.len = 2; if (i2c_transfer(state->i2c, &state->msg, 1) != 1) { printk(KERN_WARNING "DiB0090 I2C write failed\n"); ret = -EREMOTEIO; } else ret = 0; mutex_unlock(&state->i2c_buffer_lock); return ret; } #define HARD_RESET(state) do { if (cfg->reset) { if (cfg->sleep) cfg->sleep(fe, 0); msleep(10); cfg->reset(fe, 1); msleep(10); cfg->reset(fe, 0); msleep(10); } } while (0) #define ADC_TARGET -220 #define GAIN_ALPHA 5 #define WBD_ALPHA 6 #define LPF 100 static void dib0090_write_regs(struct dib0090_state *state, u8 r, const u16 * b, u8 c) { do { dib0090_write_reg(state, r++, *b++); } while (--c); } static int dib0090_identify(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u16 v; struct dib0090_identity *identity = &state->identity; v = dib0090_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO; } static int dib0090_fw_identify(struct dvb_frontend *fe) { struct dib0090_fw_state *state = fe->tuner_priv; struct dib0090_identity *identity = &state->identity; u16 v = dib0090_fw_read_reg(state, 0x1a); identity->p1g = 0; identity->in_soc = 0; dprintk("FE: Tuner identification (Version = 0x%04x)", v); /* without PLL lock info */ v &= ~KROSUS_PLL_LOCKED; identity->version = v & 0xff; identity->product = (v >> 8) & 0xf; if (identity->product != KROSUS) goto identification_error; if ((identity->version & 0x3) == SOC) { identity->in_soc = 1; switch (identity->version) { case SOC_8090_P1G_11R1: dprintk("SOC 8090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_8090_P1G_21R1: dprintk("SOC 8090 P1-G21R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_11R1: dprintk("SOC 7090 P1-G11R1 Has been detected"); identity->p1g = 1; break; case SOC_7090_P1G_21R1: dprintk("SOC 7090 P1-G21R1 Has been detected"); identity->p1g = 1; break; default: goto identification_error; } } else { switch ((identity->version >> 5) & 0x7) { case MP001: dprintk("MP001 : 9090/8096"); break; case MP005: dprintk("MP005 : Single Sband"); break; case MP008: dprintk("MP008 : diversity VHF-UHF-LBAND"); break; case MP009: dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND"); break; default: goto identification_error; } switch (identity->version & 0x1f) { case P1G_21R2: dprintk("P1G_21R2 detected"); identity->p1g = 1; break; case P1G: dprintk("P1G detected"); identity->p1g = 1; break; case P1D_E_F: dprintk("P1D/E/F detected"); break; case P1C: dprintk("P1C detected"); break; case P1A_B: dprintk("P1-A/B detected: driver is deactivated - not available"); goto identification_error; break; default: goto identification_error; } } return 0; identification_error: return -EIO; } static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_state *state = fe->tuner_priv; u16 PllCfg, i, v; HARD_RESET(state); dib0090_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); if (cfg->in_soc) return; dib0090_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ /* adcClkOutRatio=8->7, release reset */ dib0090_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (0 << 4) | 0); if (cfg->clkoutdrive != 0) dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (cfg->clkoutdrive << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); else dib0090_write_reg(state, 0x23, (0 << 15) | ((!cfg->analog_output) << 14) | (2 << 10) | (1 << 9) | (0 << 8) | (7 << 5) | (cfg->clkouttobamse << 4) | (0 << 2) | (0)); /* Read Pll current config * */ PllCfg = dib0090_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && (!cfg->in_soc) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_write_reg(state, 0x21, PllCfg); } } static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib0090_config *cfg) { struct dib0090_fw_state *state = fe->tuner_priv; u16 PllCfg; u16 v; int i; dprintk("fw reset digital"); HARD_RESET(state); dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL); dib0090_fw_write_reg(state, 0x1b, EN_DIGCLK | EN_PLL | EN_CRYSTAL); /* PLL, DIG_CLK and CRYSTAL remain */ dib0090_fw_write_reg(state, 0x20, ((cfg->io.adc_clock_ratio - 1) << 11) | (0 << 10) | (1 << 9) | (1 << 8) | (cfg->data_tx_drv << 4) | cfg->ls_cfg_pad_drv); v = (0 << 15) | ((!cfg->analog_output) << 14) | (1 << 9) | (0 << 8) | (cfg->clkouttobamse << 4) | (0 << 2) | (0); if (cfg->clkoutdrive != 0) v |= cfg->clkoutdrive << 5; else v |= 7 << 5; v |= 2 << 10; dib0090_fw_write_reg(state, 0x23, v); /* Read Pll current config * */ PllCfg = dib0090_fw_read_reg(state, 0x21); /** Reconfigure PLL if current setting is different from default setting **/ if ((PllCfg & 0x1FFF) != ((cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv)) && !cfg->io.pll_bypass) { /* Set Bypass mode */ PllCfg |= (1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Set Reset Pll */ PllCfg &= ~(1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Set new Pll configuration in bypass and reset state ***/ PllCfg = (1 << 15) | (0 << 13) | (cfg->io.pll_range << 12) | (cfg->io.pll_loopdiv << 6) | (cfg->io.pll_prediv); dib0090_fw_write_reg(state, 0x21, PllCfg); /* Remove Reset Pll */ PllCfg |= (1 << 13); dib0090_fw_write_reg(state, 0x21, PllCfg); /*** Wait for PLL lock ***/ i = 100; do { v = !!(dib0090_fw_read_reg(state, 0x1a) & 0x800); if (v) break; } while (--i); if (i == 0) { dprintk("Pll: Unable to lock Pll"); return -EIO; } /* Finally Remove Bypass mode */ PllCfg &= ~(1 << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } if (cfg->io.pll_bypass) { PllCfg |= (cfg->io.pll_bypass << 15); dib0090_fw_write_reg(state, 0x21, PllCfg); } return dib0090_fw_identify(fe); } static int dib0090_wakeup(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 0); /* enable dataTX in case we have been restarted in the wrong moment */ dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); return 0; } static int dib0090_sleep(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; if (state->config->sleep) state->config->sleep(fe, 1); return 0; } void dib0090_dcc_freq(struct dvb_frontend *fe, u8 fast) { struct dib0090_state *state = fe->tuner_priv; if (fast) dib0090_write_reg(state, 0x04, 0); else dib0090_write_reg(state, 0x04, 1); } EXPORT_SYMBOL(dib0090_dcc_freq); static const u16 bb_ramp_pwm_normal_socs[] = { 550, /* max BB gain in 10th of dB */ (1<<9) | 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */ 440, (4 << 9) | 0, /* BB_RAMP3 = 26dB */ (0 << 9) | 208, /* BB_RAMP4 */ (4 << 9) | 208, /* BB_RAMP5 = 29dB */ (0 << 9) | 440, /* BB_RAMP6 */ }; static const u16 rf_ramp_pwm_cband_7090p[] = { 280, /* max RF gain in 10th of dB */ 18, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 504, /* ramp_max = maximum X used on the ramp */ (29 << 10) | 364, /* RF_RAMP5, LNA 1 = 8dB */ (0 << 10) | 504, /* RF_RAMP6, LNA 1 */ (60 << 10) | 228, /* RF_RAMP7, LNA 2 = 7.7dB */ (0 << 10) | 364, /* RF_RAMP8, LNA 2 */ (34 << 10) | 109, /* GAIN_4_1, LNA 3 = 6.8dB */ (0 << 10) | 228, /* GAIN_4_2, LNA 3 */ (37 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */ (0 << 10) | 109, /* RF_RAMP4, LNA 4 */ }; static const u16 rf_ramp_pwm_cband_7090e_sensitivity[] = { 186, /* max RF gain in 10th of dB */ 40, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 746, /* ramp_max = maximum X used on the ramp */ (10 << 10) | 345, /* RF_RAMP5, LNA 1 = 10dB */ (0 << 10) | 746, /* RF_RAMP6, LNA 1 */ (0 << 10) | 0, /* RF_RAMP7, LNA 2 = 0 dB */ (0 << 10) | 0, /* RF_RAMP8, LNA 2 */ (28 << 10) | 200, /* GAIN_4_1, LNA 3 = 6.8dB */ /* 3.61 dB */ (0 << 10) | 345, /* GAIN_4_2, LNA 3 */ (20 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */ /* 4.96 dB */ (0 << 10) | 200, /* RF_RAMP4, LNA 4 */ }; static const u16 rf_ramp_pwm_cband_7090e_aci[] = { 86, /* max RF gain in 10th of dB */ 40, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 345, /* ramp_max = maximum X used on the ramp */ (0 << 10) | 0, /* RF_RAMP5, LNA 1 = 8dB */ /* 7.47 dB */ (0 << 10) | 0, /* RF_RAMP6, LNA 1 */ (0 << 10) | 0, /* RF_RAMP7, LNA 2 = 0 dB */ (0 << 10) | 0, /* RF_RAMP8, LNA 2 */ (28 << 10) | 200, /* GAIN_4_1, LNA 3 = 6.8dB */ /* 3.61 dB */ (0 << 10) | 345, /* GAIN_4_2, LNA 3 */ (20 << 10) | 0, /* RF_RAMP3, LNA 4 = 6.2dB */ /* 4.96 dB */ (0 << 10) | 200, /* RF_RAMP4, LNA 4 */ }; static const u16 rf_ramp_pwm_cband_8090[] = { 345, /* max RF gain in 10th of dB */ 29, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1000, /* ramp_max = maximum X used on the ramp */ (35 << 10) | 772, /* RF_RAMP3, LNA 1 = 8dB */ (0 << 10) | 1000, /* RF_RAMP4, LNA 1 */ (58 << 10) | 496, /* RF_RAMP5, LNA 2 = 9.5dB */ (0 << 10) | 772, /* RF_RAMP6, LNA 2 */ (27 << 10) | 200, /* RF_RAMP7, LNA 3 = 10.5dB */ (0 << 10) | 496, /* RF_RAMP8, LNA 3 */ (40 << 10) | 0, /* GAIN_4_1, LNA 4 = 7dB */ (0 << 10) | 200, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_7090[] = { 407, /* max RF gain in 10th of dB */ 13, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 529, /* ramp_max = maximum X used on the ramp */ (23 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 176, /* RF_RAMP4, LNA 1 */ (63 << 10) | 400, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 529, /* RF_RAMP6, LNA 2 */ (48 << 10) | 316, /* RF_RAMP7, LNA 3 = 6.8dB */ (0 << 10) | 400, /* RF_RAMP8, LNA 3 */ (29 << 10) | 176, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 316, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf_8090[] = { 388, /* max RF gain in 10th of dB */ 26, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1008, /* ramp_max = maximum X used on the ramp */ (11 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.7dB */ (0 << 10) | 369, /* RF_RAMP4, LNA 1 */ (41 << 10) | 809, /* RF_RAMP5, LNA 2 = 8dB */ (0 << 10) | 1008, /* RF_RAMP6, LNA 2 */ (27 << 10) | 659, /* RF_RAMP7, LNA 3 = 6dB */ (0 << 10) | 809, /* RF_RAMP8, LNA 3 */ (14 << 10) | 369, /* GAIN_4_1, LNA 4 = 11.5dB */ (0 << 10) | 659, /* GAIN_4_2, LNA 4 */ }; /* GENERAL PWM ramp definition for all other Krosus */ static const u16 bb_ramp_pwm_normal[] = { 500, /* max BB gain in 10th of dB */ 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */ 400, (2 << 9) | 0, /* BB_RAMP3 = 21dB */ (0 << 9) | 168, /* BB_RAMP4 */ (2 << 9) | 168, /* BB_RAMP5 = 29dB */ (0 << 9) | 400, /* BB_RAMP6 */ }; static const u16 bb_ramp_pwm_boost[] = { 550, /* max BB gain in 10th of dB */ 8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */ 440, (2 << 9) | 0, /* BB_RAMP3 = 26dB */ (0 << 9) | 208, /* BB_RAMP4 */ (2 << 9) | 208, /* BB_RAMP5 = 29dB */ (0 << 9) | 440, /* BB_RAMP6 */ }; static const u16 rf_ramp_pwm_cband[] = { 314, /* max RF gain in 10th of dB */ 33, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 1023, /* ramp_max = maximum X used on the ramp */ (8 << 10) | 743, /* RF_RAMP3, LNA 1 = 0dB */ (0 << 10) | 1023, /* RF_RAMP4, LNA 1 */ (15 << 10) | 469, /* RF_RAMP5, LNA 2 = 0dB */ (0 << 10) | 742, /* RF_RAMP6, LNA 2 */ (9 << 10) | 234, /* RF_RAMP7, LNA 3 = 0dB */ (0 << 10) | 468, /* RF_RAMP8, LNA 3 */ (9 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */ (0 << 10) | 233, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_vhf[] = { 398, /* max RF gain in 10th of dB */ 24, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 954, /* ramp_max = maximum X used on the ramp */ (7 << 10) | 0, /* RF_RAMP3, LNA 1 = 13.2dB */ (0 << 10) | 290, /* RF_RAMP4, LNA 1 */ (16 << 10) | 699, /* RF_RAMP5, LNA 2 = 10.5dB */ (0 << 10) | 954, /* RF_RAMP6, LNA 2 */ (17 << 10) | 580, /* RF_RAMP7, LNA 3 = 5dB */ (0 << 10) | 699, /* RF_RAMP8, LNA 3 */ (7 << 10) | 290, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 580, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_uhf[] = { 398, /* max RF gain in 10th of dB */ 24, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 954, /* ramp_max = maximum X used on the ramp */ (7 << 10) | 0, /* RF_RAMP3, LNA 1 = 13.2dB */ (0 << 10) | 290, /* RF_RAMP4, LNA 1 */ (16 << 10) | 699, /* RF_RAMP5, LNA 2 = 10.5dB */ (0 << 10) | 954, /* RF_RAMP6, LNA 2 */ (17 << 10) | 580, /* RF_RAMP7, LNA 3 = 5dB */ (0 << 10) | 699, /* RF_RAMP8, LNA 3 */ (7 << 10) | 290, /* GAIN_4_1, LNA 4 = 12.5dB */ (0 << 10) | 580, /* GAIN_4_2, LNA 4 */ }; static const u16 rf_ramp_pwm_sband[] = { 253, /* max RF gain in 10th of dB */ 38, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */ 961, (4 << 10) | 0, /* RF_RAMP3, LNA 1 = 14.1dB */ (0 << 10) | 508, /* RF_RAMP4, LNA 1 */ (9 << 10) | 508, /* RF_RAMP5, LNA 2 = 11.2dB */ (0 << 10) | 961, /* RF_RAMP6, LNA 2 */ (0 << 10) | 0, /* RF_RAMP7, LNA 3 = 0dB */ (0 << 10) | 0, /* RF_RAMP8, LNA 3 */ (0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */ (0 << 10) | 0, /* GAIN_4_2, LNA 4 */ }; struct slope { s16 range; s16 slope; }; static u16 slopes_to_scale(const struct slope *slopes, u8 num, s16 val) { u8 i; u16 rest; u16 ret = 0; for (i = 0; i < num; i++) { if (val > slopes[i].range) rest = slopes[i].range; else rest = val; ret += (rest * slopes[i].slope) / slopes[i].range; val -= rest; } return ret; } static const struct slope dib0090_wbd_slopes[3] = { {66, 120}, /* -64,-52: offset - 65 */ {600, 170}, /* -52,-35: 65 - 665 */ {170, 250}, /* -45,-10: 665 - 835 */ }; static s16 dib0090_wbd_to_db(struct dib0090_state *state, u16 wbd) { wbd &= 0x3ff; if (wbd < state->wbd_offset) wbd = 0; else wbd -= state->wbd_offset; /* -64dB is the floor */ return -640 + (s16) slopes_to_scale(dib0090_wbd_slopes, ARRAY_SIZE(dib0090_wbd_slopes), wbd); } static void dib0090_wbd_target(struct dib0090_state *state, u32 rf) { u16 offset = 250; /* TODO : DAB digital N+/-1 interferer perfs : offset = 10 */ if (state->current_band == BAND_VHF) offset = 650; #ifndef FIRMWARE_FIREFLY if (state->current_band == BAND_VHF) offset = state->config->wbd_vhf_offset; if (state->current_band == BAND_CBAND) offset = state->config->wbd_cband_offset; #endif state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset); dprintk("wbd-target: %d dB", (u32) state->wbd_target); } static const int gain_reg_addr[4] = { 0x08, 0x0a, 0x0f, 0x01 }; static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16 top_delta, u8 force) { u16 rf, bb, ref; u16 i, v, gain_reg[4] = { 0 }, gain; const u16 *g; if (top_delta < -511) top_delta = -511; if (top_delta > 511) top_delta = 511; if (force) { top_delta *= (1 << WBD_ALPHA); gain_delta *= (1 << GAIN_ALPHA); } if (top_delta >= ((s16) (state->rf_ramp[0] << WBD_ALPHA) - state->rf_gain_limit)) /* overflow */ state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; else state->rf_gain_limit += top_delta; if (state->rf_gain_limit < 0) /*underflow */ state->rf_gain_limit = 0; /* use gain as a temporary variable and correct current_gain */ gain = ((state->rf_gain_limit >> WBD_ALPHA) + state->bb_ramp[0]) << GAIN_ALPHA; if (gain_delta >= ((s16) gain - state->current_gain)) /* overflow */ state->current_gain = gain; else state->current_gain += gain_delta; /* cannot be less than 0 (only if gain_delta is less than 0 we can have current_gain < 0) */ if (state->current_gain < 0) state->current_gain = 0; /* now split total gain to rf and bb gain */ gain = state->current_gain >> GAIN_ALPHA; /* requested gain is bigger than rf gain limit - ACI/WBD adjustment */ if (gain > (state->rf_gain_limit >> WBD_ALPHA)) { rf = state->rf_gain_limit >> WBD_ALPHA; bb = gain - rf; if (bb > state->bb_ramp[0]) bb = state->bb_ramp[0]; } else { /* high signal level -> all gains put on RF */ rf = gain; bb = 0; } state->gain[0] = rf; state->gain[1] = bb; /* software ramp */ /* Start with RF gains */ g = state->rf_ramp + 1; /* point on RF LNA1 max gain */ ref = rf; for (i = 0; i < 7; i++) { /* Go over all amplifiers => 5RF amps + 2 BB amps = 7 amps */ if (g[0] == 0 || ref < (g[1] - g[0])) /* if total gain of the current amp is null or this amp is not concerned because it starts to work from an higher gain value */ v = 0; /* force the gain to write for the current amp to be null */ else if (ref >= g[1]) /* Gain to set is higher than the high working point of this amp */ v = g[2]; /* force this amp to be full gain */ else /* compute the value to set to this amp because we are somewhere in his range */ v = ((ref - (g[1] - g[0])) * g[2]) / g[0]; if (i == 0) /* LNA 1 reg mapping */ gain_reg[0] = v; else if (i == 1) /* LNA 2 reg mapping */ gain_reg[0] |= v << 7; else if (i == 2) /* LNA 3 reg mapping */ gain_reg[1] = v; else if (i == 3) /* LNA 4 reg mapping */ gain_reg[1] |= v << 7; else if (i == 4) /* CBAND LNA reg mapping */ gain_reg[2] = v | state->rf_lt_def; else if (i == 5) /* BB gain 1 reg mapping */ gain_reg[3] = v << 3; else if (i == 6) /* BB gain 2 reg mapping */ gain_reg[3] |= v << 8; g += 3; /* go to next gain bloc */ /* When RF is finished, start with BB */ if (i == 4) { g = state->bb_ramp + 1; /* point on BB gain 1 max gain */ ref = bb; } } gain_reg[3] |= state->bb_1_def; gain_reg[3] |= ((bb % 10) * 100) / 125; #ifdef DEBUG_AGC dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb, gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]); #endif /* Write the amplifier regs */ for (i = 0; i < 4; i++) { v = gain_reg[i]; if (force || state->gain_reg[i] != v) { state->gain_reg[i] = v; dib0090_write_reg(state, gain_reg_addr[i], v); } } } static void dib0090_set_boost(struct dib0090_state *state, int onoff) { state->bb_1_def &= 0xdfff; state->bb_1_def |= onoff << 13; } static void dib0090_set_rframp(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; } static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg) { state->rf_ramp = cfg; dib0090_write_reg(state, 0x2a, 0xffff); dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a)); dib0090_write_regs(state, 0x2c, cfg + 3, 6); dib0090_write_regs(state, 0x3e, cfg + 9, 2); } static void dib0090_set_bbramp(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ } static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg) { state->bb_ramp = cfg; dib0090_set_boost(state, cfg[0] > 500); /* we want the boost if the gain is higher that 50dB */ dib0090_write_reg(state, 0x33, 0xffff); dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33)); dib0090_write_regs(state, 0x35, cfg + 3, 4); } void dib0090_pwm_gain_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u16 *bb_ramp = (u16 *)&bb_ramp_pwm_normal; /* default baseband config */ u16 *rf_ramp = NULL; u8 en_pwm_rf_mux = 1; /* reset the AGC */ if (state->config->use_pwm_agc) { if (state->current_band == BAND_CBAND) { if (state->identity.in_soc) { bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs; if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) rf_ramp = (u16 *)&rf_ramp_pwm_cband_8090; else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) { if (state->config->is_dib7090e) { if (state->rf_ramp == NULL) rf_ramp = (u16 *)&rf_ramp_pwm_cband_7090e_sensitivity; else rf_ramp = (u16 *)state->rf_ramp; } else rf_ramp = (u16 *)&rf_ramp_pwm_cband_7090p; } } else rf_ramp = (u16 *)&rf_ramp_pwm_cband; } else if (state->current_band == BAND_VHF) { if (state->identity.in_soc) { bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs; /* rf_ramp = &rf_ramp_pwm_vhf_socs; */ /* TODO */ } else rf_ramp = (u16 *)&rf_ramp_pwm_vhf; } else if (state->current_band == BAND_UHF) { if (state->identity.in_soc) { bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs; if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) rf_ramp = (u16 *)&rf_ramp_pwm_uhf_8090; else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) rf_ramp = (u16 *)&rf_ramp_pwm_uhf_7090; } else rf_ramp = (u16 *)&rf_ramp_pwm_uhf; } if (rf_ramp) dib0090_set_rframp_pwm(state, rf_ramp); dib0090_set_bbramp_pwm(state, bb_ramp); /* activate the ramp generator using PWM control */ dprintk("ramp RF gain = %d BAND = %s version = %d", state->rf_ramp[0], (state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND", state->identity.version & 0x1f); if ((state->rf_ramp[0] == 0) || (state->current_band == BAND_CBAND && (state->identity.version & 0x1f) <= P1D_E_F)) { dprintk("DE-Engage mux for direct gain reg control"); en_pwm_rf_mux = 0; } else dprintk("Engage mux for PWM control"); dib0090_write_reg(state, 0x32, (en_pwm_rf_mux << 12) | (en_pwm_rf_mux << 11)); /* Set fast servo cutoff to start AGC; 0 = 1KHz ; 1 = 50Hz ; 2 = 150Hz ; 3 = 50KHz ; 4 = servo fast*/ if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) dib0090_write_reg(state, 0x04, 3); else dib0090_write_reg(state, 0x04, 1); dib0090_write_reg(state, 0x39, (1 << 10)); /* 0 gain by default */ } } EXPORT_SYMBOL(dib0090_pwm_gain_reset); void dib0090_set_dc_servo(struct dvb_frontend *fe, u8 DC_servo_cutoff) { struct dib0090_state *state = fe->tuner_priv; if (DC_servo_cutoff < 4) dib0090_write_reg(state, 0x04, DC_servo_cutoff); } EXPORT_SYMBOL(dib0090_set_dc_servo); static u32 dib0090_get_slow_adc_val(struct dib0090_state *state) { u16 adc_val = dib0090_read_reg(state, 0x1d); if (state->identity.in_soc) adc_val >>= 2; return adc_val; } int dib0090_gain_control(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; enum frontend_tune_state *tune_state = &state->tune_state; int ret = 10; u16 wbd_val = 0; u8 apply_gain_immediatly = 1; s16 wbd_error = 0, adc_error = 0; if (*tune_state == CT_AGC_START) { state->agc_freeze = 0; dib0090_write_reg(state, 0x04, 0x0); #ifdef CONFIG_BAND_SBAND if (state->current_band == BAND_SBAND) { dib0090_set_rframp(state, rf_ramp_sband); dib0090_set_bbramp(state, bb_ramp_boost); } else #endif #ifdef CONFIG_BAND_VHF if (state->current_band == BAND_VHF && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_pwm_vhf); dib0090_set_bbramp(state, bb_ramp_pwm_normal); } else #endif #ifdef CONFIG_BAND_CBAND if (state->current_band == BAND_CBAND && !state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_pwm_cband); dib0090_set_bbramp(state, bb_ramp_pwm_normal); } else #endif if ((state->current_band == BAND_CBAND || state->current_band == BAND_VHF) && state->identity.p1g) { dib0090_set_rframp(state, rf_ramp_pwm_cband_7090p); dib0090_set_bbramp(state, bb_ramp_pwm_normal_socs); } else { dib0090_set_rframp(state, rf_ramp_pwm_uhf); dib0090_set_bbramp(state, bb_ramp_pwm_normal); } dib0090_write_reg(state, 0x32, 0); dib0090_write_reg(state, 0x39, 0); dib0090_wbd_target(state, state->current_rf); state->rf_gain_limit = state->rf_ramp[0] << WBD_ALPHA; state->current_gain = ((state->rf_ramp[0] + state->bb_ramp[0]) / 2) << GAIN_ALPHA; *tune_state = CT_AGC_STEP_0; } else if (!state->agc_freeze) { s16 wbd = 0, i, cnt; int adc; wbd_val = dib0090_get_slow_adc_val(state); if (*tune_state == CT_AGC_STEP_0) cnt = 5; else cnt = 1; for (i = 0; i < cnt; i++) { wbd_val = dib0090_get_slow_adc_val(state); wbd += dib0090_wbd_to_db(state, wbd_val); } wbd /= cnt; wbd_error = state->wbd_target - wbd; if (*tune_state == CT_AGC_STEP_0) { if (wbd_error < 0 && state->rf_gain_limit > 0 && !state->identity.p1g) { #ifdef CONFIG_BAND_CBAND /* in case of CBAND tune reduce first the lt_gain2 before adjusting the RF gain */ u8 ltg2 = (state->rf_lt_def >> 10) & 0x7; if (state->current_band == BAND_CBAND && ltg2) { ltg2 >>= 1; state->rf_lt_def &= ltg2 << 10; /* reduce in 3 steps from 7 to 0 */ } #endif } else { state->agc_step = 0; *tune_state = CT_AGC_STEP_1; } } else { /* calc the adc power */ adc = state->config->get_adc_power(fe); adc = (adc * ((s32) 355774) + (((s32) 1) << 20)) >> 21; /* included in [0:-700] */ adc_error = (s16) (((s32) ADC_TARGET) - adc); #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) adc_error -= 10; #endif #ifdef CONFIG_STANDARD_DVBT if (state->fe->dtv_property_cache.delivery_system == STANDARD_DVBT && (state->fe->dtv_property_cache.modulation == QAM_64 || state->fe->dtv_property_cache.modulation == QAM_16)) adc_error += 60; #endif #ifdef CONFIG_SYS_ISDBT if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT) && (((state->fe->dtv_property_cache.layer[0].segment_count > 0) && ((state->fe->dtv_property_cache.layer[0].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[0].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[1].segment_count > 0) && ((state->fe->dtv_property_cache.layer[1].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[1].modulation == QAM_16))) || ((state->fe->dtv_property_cache.layer[2].segment_count > 0) && ((state->fe->dtv_property_cache.layer[2].modulation == QAM_64) || (state->fe->dtv_property_cache. layer[2].modulation == QAM_16))) ) ) adc_error += 60; #endif if (*tune_state == CT_AGC_STEP_1) { /* quickly go to the correct range of the ADC power */ if (ABS(adc_error) < 50 || state->agc_step++ > 5) { #ifdef CONFIG_STANDARD_DAB if (state->fe->dtv_property_cache.delivery_system == STANDARD_DAB) { dib0090_write_reg(state, 0x02, (1 << 15) | (15 << 11) | (31 << 6) | (63)); /* cap value = 63 : narrow BB filter : Fc = 1.8MHz */ dib0090_write_reg(state, 0x04, 0x0); } else #endif { dib0090_write_reg(state, 0x02, (1 << 15) | (3 << 11) | (6 << 6) | (32)); dib0090_write_reg(state, 0x04, 0x01); /*0 = 1KHz ; 1 = 150Hz ; 2 = 50Hz ; 3 = 50KHz ; 4 = servo fast */ } *tune_state = CT_AGC_STOP; } } else { /* everything higher than or equal to CT_AGC_STOP means tracking */ ret = 100; /* 10ms interval */ apply_gain_immediatly = 0; } } #ifdef DEBUG_AGC dprintk ("tune state %d, ADC = %3ddB (ADC err %3d) WBD %3ddB (WBD err %3d, WBD val SADC: %4d), RFGainLimit (TOP): %3d, signal: %3ddBm", (u32) *tune_state, (u32) adc, (u32) adc_error, (u32) wbd, (u32) wbd_error, (u32) wbd_val, (u32) state->rf_gain_limit >> WBD_ALPHA, (s32) 200 + adc - (state->current_gain >> GAIN_ALPHA)); #endif } /* apply gain */ if (!state->agc_freeze) dib0090_gain_apply(state, adc_error, wbd_error, apply_gain_immediatly); return ret; } EXPORT_SYMBOL(dib0090_gain_control); void dib0090_get_current_gain(struct dvb_frontend *fe, u16 * rf, u16 * bb, u16 * rf_gain_limit, u16 * rflt) { struct dib0090_state *state = fe->tuner_priv; if (rf) *rf = state->gain[0]; if (bb) *bb = state->gain[1]; if (rf_gain_limit) *rf_gain_limit = state->rf_gain_limit; if (rflt) *rflt = (state->rf_lt_def >> 10) & 0x7; } EXPORT_SYMBOL(dib0090_get_current_gain); u16 dib0090_get_wbd_target(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u32 f_MHz = state->fe->dtv_property_cache.frequency / 1000000; s32 current_temp = state->temperature; s32 wbd_thot, wbd_tcold; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (f_MHz > wbd->max_freq) wbd++; dprintk("using wbd-table-entry with max freq %d", wbd->max_freq); if (current_temp < 0) current_temp = 0; if (current_temp > 128) current_temp = 128; state->wbdmux &= ~(7 << 13); if (wbd->wbd_gain != 0) state->wbdmux |= (wbd->wbd_gain << 13); else state->wbdmux |= (4 << 13); dib0090_write_reg(state, 0x10, state->wbdmux); wbd_thot = wbd->offset_hot - (((u32) wbd->slope_hot * f_MHz) >> 6); wbd_tcold = wbd->offset_cold - (((u32) wbd->slope_cold * f_MHz) >> 6); wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7; state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold); dprintk("wbd-target: %d dB", (u32) state->wbd_target); dprintk("wbd offset applied is %d", wbd_tcold); return state->wbd_offset + wbd_tcold; } EXPORT_SYMBOL(dib0090_get_wbd_target); u16 dib0090_get_wbd_offset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; return state->wbd_offset; } EXPORT_SYMBOL(dib0090_get_wbd_offset); int dib0090_set_switch(struct dvb_frontend *fe, u8 sw1, u8 sw2, u8 sw3) { struct dib0090_state *state = fe->tuner_priv; dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xfff8) | ((sw3 & 1) << 2) | ((sw2 & 1) << 1) | (sw1 & 1)); return 0; } EXPORT_SYMBOL(dib0090_set_switch); int dib0090_set_vga(struct dvb_frontend *fe, u8 onoff) { struct dib0090_state *state = fe->tuner_priv; dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x7fff) | ((onoff & 1) << 15)); return 0; } EXPORT_SYMBOL(dib0090_set_vga); int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity) { struct dib0090_state *state = fe->tuner_priv; if ((!state->identity.p1g) || (!state->identity.in_soc) || ((state->identity.version != SOC_7090_P1G_21R1) && (state->identity.version != SOC_7090_P1G_11R1))) { dprintk("%s() function can only be used for dib7090P", __func__); return -ENODEV; } if (cfg_sensitivity) state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_sensitivity; else state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_aci; dib0090_pwm_gain_reset(fe); return 0; } EXPORT_SYMBOL(dib0090_update_rframp_7090); static const u16 dib0090_defaults[] = { 25, 0x01, 0x0000, 0x99a0, 0x6008, 0x0000, 0x8bcb, 0x0000, 0x0405, 0x0000, 0x0000, 0x0000, 0xb802, 0x0300, 0x2d12, 0xbac0, 0x7c00, 0xdbb9, 0x0954, 0x0743, 0x8000, 0x0001, 0x0040, 0x0100, 0x0000, 0xe910, 0x149e, 1, 0x1c, 0xff2d, 1, 0x39, 0x0000, 2, 0x1e, 0x07FF, 0x0007, 1, 0x24, EN_UHF | EN_CRYSTAL, 2, 0x3c, 0x3ff, 0x111, 0 }; static const u16 dib0090_p1g_additionnal_defaults[] = { 1, 0x05, 0xabcd, 1, 0x11, 0x00b4, 1, 0x1c, 0xfffd, 1, 0x40, 0x108, 0 }; static void dib0090_set_default_config(struct dib0090_state *state, const u16 * n) { u16 l, r; l = pgm_read_word(n++); while (l) { r = pgm_read_word(n++); do { dib0090_write_reg(state, r, pgm_read_word(n++)); r++; } while (--l); l = pgm_read_word(n++); } } #define CAP_VALUE_MIN (u8) 9 #define CAP_VALUE_MAX (u8) 40 #define HR_MIN (u8) 25 #define HR_MAX (u8) 40 #define POLY_MIN (u8) 0 #define POLY_MAX (u8) 8 static void dib0090_set_EFUSE(struct dib0090_state *state) { u8 c, h, n; u16 e2, e4; u16 cal; e2 = dib0090_read_reg(state, 0x26); e4 = dib0090_read_reg(state, 0x28); if ((state->identity.version == P1D_E_F) || (state->identity.version == P1G) || (e2 == 0xffff)) { dib0090_write_reg(state, 0x22, 0x10); cal = (dib0090_read_reg(state, 0x22) >> 6) & 0x3ff; if ((cal < 670) || (cal == 1023)) cal = 850; n = 165 - ((cal * 10)>>6) ; e2 = e4 = (3<<12) | (34<<6) | (n); } if (e2 != e4) e2 &= e4; /* Remove the redundancy */ if (e2 != 0xffff) { c = e2 & 0x3f; n = (e2 >> 12) & 0xf; h = (e2 >> 6) & 0x3f; if ((c >= CAP_VALUE_MAX) || (c <= CAP_VALUE_MIN)) c = 32; else c += 14; if ((h >= HR_MAX) || (h <= HR_MIN)) h = 34; if ((n >= POLY_MAX) || (n <= POLY_MIN)) n = 3; dib0090_write_reg(state, 0x13, (h << 10)); e2 = (n << 11) | ((h >> 2)<<6) | c; dib0090_write_reg(state, 0x2, e2); /* Load the BB_2 */ } } static int dib0090_reset(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; dib0090_reset_digital(fe, state->config); if (dib0090_identify(fe) < 0) return -EIO; #ifdef CONFIG_TUNER_DIB0090_P1B_SUPPORT if (!(state->identity.version & 0x1)) /* it is P1B - reset is already done */ return 0; #endif if (!state->identity.in_soc) { if ((dib0090_read_reg(state, 0x1a) >> 5) & 0x2) dib0090_write_reg(state, 0x1b, (EN_IQADC | EN_BB | EN_BIAS | EN_DIGCLK | EN_PLL | EN_CRYSTAL)); else dib0090_write_reg(state, 0x1b, (EN_DIGCLK | EN_PLL | EN_CRYSTAL)); } dib0090_set_default_config(state, dib0090_defaults); if (state->identity.in_soc) dib0090_write_reg(state, 0x18, 0x2910); /* charge pump current = 0 */ if (state->identity.p1g) dib0090_set_default_config(state, dib0090_p1g_additionnal_defaults); /* Update the efuse : Only available for KROSUS > P1C and SOC as well*/ if (((state->identity.version & 0x1f) >= P1D_E_F) || (state->identity.in_soc)) dib0090_set_EFUSE(state); /* Congigure in function of the crystal */ if (state->config->force_crystal_mode != 0) dib0090_write_reg(state, 0x14, state->config->force_crystal_mode & 3); else if (state->config->io.clock_khz >= 24000) dib0090_write_reg(state, 0x14, 1); else dib0090_write_reg(state, 0x14, 2); dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1); state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL; /* enable iq-offset-calibration and wbd-calibration when tuning next time */ return 0; } #define steps(u) (((u) > 15) ? ((u)-16) : (u)) #define INTERN_WAIT 10 static int dib0090_get_offset(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = INTERN_WAIT * 10; switch (*tune_state) { case CT_TUNER_STEP_2: /* Turns to positive */ dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_STEP_3; break; case CT_TUNER_STEP_3: state->adc_diff = dib0090_read_reg(state, 0x1d); /* Turns to negative */ dib0090_write_reg(state, 0x1f, 0x4); *tune_state = CT_TUNER_STEP_4; break; case CT_TUNER_STEP_4: state->adc_diff -= dib0090_read_reg(state, 0x1d); *tune_state = CT_TUNER_STEP_5; ret = 0; break; default: break; } return ret; } struct dc_calibration { u8 addr; u8 offset; u8 pga:1; u16 bb1; u8 i:1; }; static const struct dc_calibration dc_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (26 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (26 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (26 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (26 << 3), 0}, {0}, }; static const struct dc_calibration dc_p1g_table[] = { /* Step1 BB gain1= 26 with boost 1, gain 2 = 0 */ /* addr ; trim reg offset ; pga ; CTRL_BB1 value ; i or q */ {0x06, 5, 1, (1 << 13) | (0 << 8) | (15 << 3), 1}, {0x07, 11, 1, (1 << 13) | (0 << 8) | (15 << 3), 0}, /* Step 2 BB gain 1 = 26 with boost = 1 & gain 2 = 29 */ {0x06, 0, 0, (1 << 13) | (29 << 8) | (15 << 3), 1}, {0x06, 10, 0, (1 << 13) | (29 << 8) | (15 << 3), 0}, {0}, }; static void dib0090_set_trim(struct dib0090_state *state) { u16 *val; if (state->dc->addr == 0x07) val = &state->bb7; else val = &state->bb6; *val &= ~(0x1f << state->dc->offset); *val |= state->step << state->dc->offset; dib0090_write_reg(state, state->dc->addr, *val); } static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 reg; switch (*tune_state) { case CT_TUNER_START: dprintk("Start DC offset calibration"); /* force vcm2 = 0.8V */ state->bb6 = 0; state->bb7 = 0x040d; /* the LNA AND LO are off */ reg = dib0090_read_reg(state, 0x24) & 0x0ffb; /* shutdown lna and lo */ dib0090_write_reg(state, 0x24, reg); state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x7 << 3) | 0x3); dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); state->dc = dc_table; if (state->identity.p1g) state->dc = dc_p1g_table; *tune_state = CT_TUNER_STEP_0; /* fall through */ case CT_TUNER_STEP_0: dprintk("Sart/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q"); dib0090_write_reg(state, 0x01, state->dc->bb1); dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7)); state->step = 0; state->min_adc_diff = 1023; *tune_state = CT_TUNER_STEP_1; ret = 50; break; case CT_TUNER_STEP_1: dib0090_set_trim(state); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: case CT_TUNER_STEP_3: case CT_TUNER_STEP_4: ret = dib0090_get_offset(state, tune_state); break; case CT_TUNER_STEP_5: /* found an offset */ dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step); if (state->step == 0 && state->adc_diff < 0) { state->min_adc_diff = -1023; dprintk("Change of sign of the minimum adc diff"); } dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step); /* first turn for this frequency */ if (state->step == 0) { if (state->dc->pga && state->adc_diff < 0) state->step = 0x10; if (state->dc->pga == 0 && state->adc_diff > 0) state->step = 0x10; } /* Look for a change of Sign in the Adc_diff.min_adc_diff is used to STORE the setp N-1 */ if ((state->adc_diff & 0x8000) == (state->min_adc_diff & 0x8000) && steps(state->step) < 15) { /* stop search when the delta the sign is changing and Steps =15 and Step=0 is force for continuance */ state->step++; state->min_adc_diff = state->adc_diff; *tune_state = CT_TUNER_STEP_1; } else { /* the minimum was what we have seen in the step before */ if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) { dprintk("Since adc_diff N = %d > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff); state->step--; } dib0090_set_trim(state); dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step); state->dc++; if (state->dc->addr == 0) /* done */ *tune_state = CT_TUNER_STEP_6; else *tune_state = CT_TUNER_STEP_0; } break; case CT_TUNER_STEP_6: dib0090_write_reg(state, 0x07, state->bb7 & ~0x0008); dib0090_write_reg(state, 0x1f, 0x7); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~DC_CAL; default: break; } return ret; } static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tune_state *tune_state) { u8 wbd_gain; const struct dib0090_wbd_slope *wbd = state->current_wbd_table; switch (*tune_state) { case CT_TUNER_START: while (state->current_rf / 1000 > wbd->max_freq) wbd++; if (wbd->wbd_gain != 0) wbd_gain = wbd->wbd_gain; else { wbd_gain = 4; #if defined(CONFIG_BAND_LBAND) || defined(CONFIG_BAND_SBAND) if ((state->current_band == BAND_LBAND) || (state->current_band == BAND_SBAND)) wbd_gain = 2; #endif } if (wbd_gain == state->wbd_calibration_gain) { /* the WBD calibration has already been done */ *tune_state = CT_TUNER_START; state->calibrate &= ~WBD_CAL; return 0; } dib0090_write_reg(state, 0x10, 0x1b81 | (1 << 10) | (wbd_gain << 13) | (1 << 3)); dib0090_write_reg(state, 0x24, ((EN_UHF & 0x0fff) | (1 << 1))); *tune_state = CT_TUNER_STEP_0; state->wbd_calibration_gain = wbd_gain; return 90; /* wait for the WBDMUX to switch and for the ADC to sample */ case CT_TUNER_STEP_0: state->wbd_offset = dib0090_get_slow_adc_val(state); dprintk("WBD calibration offset = %d", state->wbd_offset); *tune_state = CT_TUNER_START; /* reset done -> real tuning can now begin */ state->calibrate &= ~WBD_CAL; break; default: break; } return 0; } static void dib0090_set_bandwidth(struct dib0090_state *state) { u16 tmp; if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 5000) tmp = (3 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 6000) tmp = (2 << 14); else if (state->fe->dtv_property_cache.bandwidth_hz / 1000 <= 7000) tmp = (1 << 14); else tmp = (0 << 14); state->bb_1_def &= 0x3fff; state->bb_1_def |= tmp; dib0090_write_reg(state, 0x01, state->bb_1_def); /* be sure that we have the right bb-filter */ dib0090_write_reg(state, 0x03, 0x6008); /* = 0x6008 : vcm3_trim = 1 ; filter2_gm1_trim = 8 ; filter2_cutoff_freq = 0 */ dib0090_write_reg(state, 0x04, 0x1); /* 0 = 1KHz ; 1 = 50Hz ; 2 = 150Hz ; 3 = 50KHz ; 4 = servo fast */ if (state->identity.in_soc) { dib0090_write_reg(state, 0x05, 0x9bcf); /* attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 1 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 15 */ } else { dib0090_write_reg(state, 0x02, (5 << 11) | (8 << 6) | (22 & 0x3f)); /* 22 = cap_value */ dib0090_write_reg(state, 0x05, 0xabcd); /* = 0xabcd : attenuator_ibias_tri = 2 ; input_stage_ibias_tr = 2 ; nc = 11 ; ext_gm_trim = 1 ; obuf_ibias_trim = 4 ; filter13_gm2_ibias_t = 13 */ } } static const struct dib0090_pll dib0090_pll_table[] = { #ifdef CONFIG_BAND_CBAND {56000, 0, 9, 48, 6}, {70000, 1, 9, 48, 6}, {87000, 0, 8, 32, 4}, {105000, 1, 8, 32, 4}, {115000, 0, 7, 24, 6}, {140000, 1, 7, 24, 6}, {170000, 0, 6, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 6, 16, 4}, {230000, 0, 5, 12, 6}, {280000, 1, 5, 12, 6}, {340000, 0, 4, 8, 4}, {380000, 1, 4, 8, 4}, {450000, 0, 3, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 3, 6, 6}, {700000, 0, 2, 4, 4}, {860000, 1, 2, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 0, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 14, 1, 4}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {227000, 4, 3, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, {380000, 4, 7, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 15, 0x280, 0x2912, 0xb94e, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table[] = { #ifdef CONFIG_BAND_CBAND {170000, 4, 1, 0x820f, 0x300, 0x2d22, 0x82cb, EN_CAB}, #endif #ifdef CONFIG_BAND_VHF {184000, 1, 1, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {227000, 1, 3, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, {380000, 1, 7, 15, 0x300, 0x4d12, 0xb94e, EN_VHF}, #endif #ifdef CONFIG_BAND_UHF {510000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {540000, 2, 1, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {600000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {630000, 2, 4, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {680000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {720000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_pll dib0090_p1g_pll_table[] = { #ifdef CONFIG_BAND_CBAND {57000, 0, 11, 48, 6}, {70000, 1, 11, 48, 6}, {86000, 0, 10, 32, 4}, {105000, 1, 10, 32, 4}, {115000, 0, 9, 24, 6}, {140000, 1, 9, 24, 6}, {170000, 0, 8, 16, 4}, #endif #ifdef CONFIG_BAND_VHF {200000, 1, 8, 16, 4}, {230000, 0, 7, 12, 6}, {280000, 1, 7, 12, 6}, {340000, 0, 6, 8, 4}, {380000, 1, 6, 8, 4}, {455000, 0, 5, 6, 6}, #endif #ifdef CONFIG_BAND_UHF {580000, 1, 5, 6, 6}, {680000, 0, 4, 4, 4}, {860000, 1, 4, 4, 4}, #endif #ifdef CONFIG_BAND_LBAND {1800000, 1, 2, 2, 4}, #endif #ifdef CONFIG_BAND_SBAND {2900000, 0, 1, 1, 6}, #endif }; static const struct dib0090_tuning dib0090_p1g_tuning_table_fm_vhf_on_cband[] = { #ifdef CONFIG_BAND_CBAND {184000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {227000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, {380000, 4, 3, 0x4187, 0x2c0, 0x2d22, 0x81cb, EN_CAB}, #endif #ifdef CONFIG_BAND_UHF {520000, 2, 0, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {550000, 2, 2, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {650000, 2, 3, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {750000, 2, 5, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {850000, 2, 6, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, {900000, 2, 7, 15, 0x300, 0x1d12, 0xb9ce, EN_UHF}, #endif #ifdef CONFIG_BAND_LBAND {1500000, 4, 0, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1600000, 4, 1, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, {1800000, 4, 3, 20, 0x300, 0x1912, 0x82c9, EN_LBD}, #endif #ifdef CONFIG_BAND_SBAND {2300000, 1, 4, 20, 0x300, 0x2d2A, 0x82c7, EN_SBD}, {2900000, 1, 7, 20, 0x280, 0x2deb, 0x8347, EN_SBD}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_cband_7090[] = { #ifdef CONFIG_BAND_CBAND {300000, 4, 3, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {380000, 4, 10, 0x018F, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {570000, 4, 10, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, {858000, 4, 5, 0x8190, 0x2c0, 0x2d22, 0xb9ce, EN_CAB}, #endif }; static const struct dib0090_tuning dib0090_tuning_table_cband_7090e_sensitivity[] = { #ifdef CONFIG_BAND_CBAND { 300000, 0 , 3, 0x8105, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 380000, 0 , 10, 0x810F, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 600000, 0 , 10, 0x815E, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 660000, 0 , 5, 0x85E3, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 720000, 0 , 5, 0x852E, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 860000, 0 , 4, 0x85E5, 0x280, 0x2d12, 0xb84e, EN_CAB }, #endif }; int dib0090_update_tuning_table_7090(struct dvb_frontend *fe, u8 cfg_sensitivity) { struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = dib0090_tuning_table_cband_7090e_sensitivity; const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = { { 300000, 0 , 3, 0x8165, 0x2c0, 0x2d12, 0xb84e, EN_CAB }, { 650000, 0 , 4, 0x815B, 0x280, 0x2d12, 0xb84e, EN_CAB }, { 860000, 0 , 5, 0x84EF, 0x280, 0x2d12, 0xb84e, EN_CAB }, }; if ((!state->identity.p1g) || (!state->identity.in_soc) || ((state->identity.version != SOC_7090_P1G_21R1) && (state->identity.version != SOC_7090_P1G_11R1))) { dprintk("%s() function can only be used for dib7090", __func__); return -ENODEV; } if (cfg_sensitivity) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090e_aci; while (state->rf_request > tune->max_freq) tune++; dib0090_write_reg(state, 0x09, (dib0090_read_reg(state, 0x09) & 0x8000) | (tune->lna_bias & 0x7fff)); dib0090_write_reg(state, 0x0b, (dib0090_read_reg(state, 0x0b) & 0xf83f) | ((tune->lna_tune << 6) & 0x07c0)); return 0; } EXPORT_SYMBOL(dib0090_update_tuning_table_7090); static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 0; u16 lo4 = 0xe900; s16 adc_target; u16 adc; s8 step_sign; u8 force_soft_search = 0; if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1) force_soft_search = 1; if (*tune_state == CT_TUNER_START) { dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO"); dib0090_write_reg(state, 0x10, 0x2B1); dib0090_write_reg(state, 0x1e, 0x0032); if (!state->tuner_is_tuned) { /* prepare a complete captrim */ if (!state->identity.p1g || force_soft_search) state->step = state->captrim = state->fcaptrim = 64; state->current_rf = state->rf_request; } else { /* we are already tuned to this frequency - the configuration is correct */ if (!state->identity.p1g || force_soft_search) { /* do a minimal captrim even if the frequency has not changed */ state->step = 4; state->captrim = state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7f; } } state->adc_diff = 3000; *tune_state = CT_TUNER_STEP_0; } else if (*tune_state == CT_TUNER_STEP_0) { if (state->identity.p1g && !force_soft_search) { u8 ratio = 31; dib0090_write_reg(state, 0x40, (3 << 7) | (ratio << 2) | (1 << 1) | 1); dib0090_read_reg(state, 0x40); ret = 50; } else { state->step /= 2; dib0090_write_reg(state, 0x18, lo4 | state->captrim); if (state->identity.in_soc) ret = 25; } *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { if (state->identity.p1g && !force_soft_search) { dib0090_write_reg(state, 0x40, 0x18c | (0 << 1) | 0); dib0090_read_reg(state, 0x40); state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F; dprintk("***Final Captrim= 0x%x", state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else { /* MERGE for all krosus before P1G */ adc = dib0090_get_slow_adc_val(state); dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024); if (state->rest == 0 || state->identity.in_soc) { /* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */ adc_target = 200; } else adc_target = 400; if (adc >= adc_target) { adc -= adc_target; step_sign = -1; } else { adc = adc_target - adc; step_sign = 1; } if (adc < state->adc_diff) { dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff); state->adc_diff = adc; state->fcaptrim = state->captrim; } state->captrim += step_sign * state->step; if (state->step >= 1) *tune_state = CT_TUNER_STEP_0; else *tune_state = CT_TUNER_STEP_2; ret = 25; } } else if (*tune_state == CT_TUNER_STEP_2) { /* this step is only used by krosus < P1G */ /*write the final cptrim config */ dib0090_write_reg(state, 0x18, lo4 | state->fcaptrim); *tune_state = CT_TUNER_STEP_3; } else if (*tune_state == CT_TUNER_STEP_3) { state->calibrate &= ~CAPTRIM_CAL; *tune_state = CT_TUNER_STEP_0; } return ret; } static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tune_state *tune_state) { int ret = 15; s16 val; switch (*tune_state) { case CT_TUNER_START: state->wbdmux = dib0090_read_reg(state, 0x10); dib0090_write_reg(state, 0x10, (state->wbdmux & ~(0xff << 3)) | (0x8 << 3)); state->bias = dib0090_read_reg(state, 0x13); dib0090_write_reg(state, 0x13, state->bias | (0x3 << 8)); *tune_state = CT_TUNER_STEP_0; /* wait for the WBDMUX to switch and for the ADC to sample */ break; case CT_TUNER_STEP_0: state->adc_diff = dib0090_get_slow_adc_val(state); dib0090_write_reg(state, 0x13, (state->bias & ~(0x3 << 8)) | (0x2 << 8)); *tune_state = CT_TUNER_STEP_1; break; case CT_TUNER_STEP_1: val = dib0090_get_slow_adc_val(state); state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55; dprintk("temperature: %d C", state->temperature - 30); *tune_state = CT_TUNER_STEP_2; break; case CT_TUNER_STEP_2: dib0090_write_reg(state, 0x13, state->bias); dib0090_write_reg(state, 0x10, state->wbdmux); /* write back original WBDMUX */ *tune_state = CT_TUNER_START; state->calibrate &= ~TEMP_CAL; if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); break; default: ret = 0; break; } return ret; } #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ static int dib0090_tune(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; const struct dib0090_tuning *tune = state->current_tune_table_index; const struct dib0090_pll *pll = state->current_pll_table_index; enum frontend_tune_state *tune_state = &state->tune_state; u16 lo5, lo6, Den, tmp; u32 FBDiv, Rest, FREF, VCOF_kHz = 0; int ret = 10; /* 1ms is the default delay most of the time */ u8 c, i; /************************* VCO ***************************/ /* Default values for FG */ /* from these are needed : */ /* Cp,HFdiv,VCOband,SD,Num,Den,FB and REFDiv */ /* in any case we first need to do a calibration if needed */ if (*tune_state == CT_TUNER_START) { /* deactivate DataTX before some calibrations */ if (state->calibrate & (DC_CAL | TEMP_CAL | WBD_CAL)) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) & ~(1 << 14)); else /* Activate DataTX in case a calibration has been done before */ if (state->config->analog_output == 0) dib0090_write_reg(state, 0x23, dib0090_read_reg(state, 0x23) | (1 << 14)); } if (state->calibrate & DC_CAL) return dib0090_dc_offset_calibration(state, tune_state); else if (state->calibrate & WBD_CAL) { if (state->current_rf == 0) state->current_rf = state->fe->dtv_property_cache.frequency / 1000; return dib0090_wbd_calibration(state, tune_state); } else if (state->calibrate & TEMP_CAL) return dib0090_get_temperature(state, tune_state); else if (state->calibrate & CAPTRIM_CAL) return dib0090_captrim_search(state, tune_state); if (*tune_state == CT_TUNER_START) { /* if soc and AGC pwm control, disengage mux to be able to R/W access to 0x01 register to set the right filter (cutoff_freq_select) during the tune sequence, otherwise, SOC SERPAR error when accessing to 0x01 */ if (state->config->use_pwm_agc && state->identity.in_soc) { tmp = dib0090_read_reg(state, 0x39); if ((tmp >> 10) & 0x1) dib0090_write_reg(state, 0x39, tmp & ~(1 << 10)); } state->current_band = (u8) BAND_OF_FREQUENCY(state->fe->dtv_property_cache.frequency / 1000); state->rf_request = state->fe->dtv_property_cache.frequency / 1000 + (state->current_band == BAND_UHF ? state->config->freq_offset_khz_uhf : state->config-> freq_offset_khz_vhf); /* in ISDB-T 1seg we shift tuning frequency */ if ((state->fe->dtv_property_cache.delivery_system == SYS_ISDBT && state->fe->dtv_property_cache.isdbt_sb_mode == 1 && state->fe->dtv_property_cache.isdbt_partial_reception == 0)) { const struct dib0090_low_if_offset_table *LUT_offset = state->config->low_if; u8 found_offset = 0; u32 margin_khz = 100; if (LUT_offset != NULL) { while (LUT_offset->RF_freq != 0xffff) { if (((state->rf_request > (LUT_offset->RF_freq - margin_khz)) && (state->rf_request < (LUT_offset->RF_freq + margin_khz))) && LUT_offset->std == state->fe->dtv_property_cache.delivery_system) { state->rf_request += LUT_offset->offset_khz; found_offset = 1; break; } LUT_offset++; } } if (found_offset == 0) state->rf_request += 400; } if (state->current_rf != state->rf_request || (state->current_standard != state->fe->dtv_property_cache.delivery_system)) { state->tuner_is_tuned = 0; state->current_rf = 0; state->current_standard = 0; tune = dib0090_tuning_table; if (state->identity.p1g) tune = dib0090_p1g_tuning_table; tmp = (state->identity.version >> 5) & 0x7; if (state->identity.in_soc) { if (state->config->force_cband_input) { /* Use the CBAND input for all band */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF || state->current_band & BAND_UHF) { state->current_band = BAND_CBAND; if (state->config->is_dib7090e) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090; } } else { /* Use the CBAND input for all band under UHF */ if (state->current_band & BAND_CBAND || state->current_band & BAND_FM || state->current_band & BAND_VHF) { state->current_band = BAND_CBAND; if (state->config->is_dib7090e) tune = dib0090_tuning_table_cband_7090e_sensitivity; else tune = dib0090_tuning_table_cband_7090; } } } else if (tmp == 0x4 || tmp == 0x7) { /* CBAND tuner version for VHF */ if (state->current_band == BAND_FM || state->current_band == BAND_CBAND || state->current_band == BAND_VHF) { state->current_band = BAND_CBAND; /* Force CBAND */ tune = dib0090_tuning_table_fm_vhf_on_cband; if (state->identity.p1g) tune = dib0090_p1g_tuning_table_fm_vhf_on_cband; } } pll = dib0090_pll_table; if (state->identity.p1g) pll = dib0090_p1g_pll_table; /* Look for the interval */ while (state->rf_request > tune->max_freq) tune++; while (state->rf_request > pll->max_freq) pll++; state->current_tune_table_index = tune; state->current_pll_table_index = pll; dib0090_write_reg(state, 0x0b, 0xb800 | (tune->switch_trim)); VCOF_kHz = (pll->hfdiv * state->rf_request) * 2; FREF = state->config->io.clock_khz; if (state->config->fref_clock_ratio != 0) FREF /= state->config->fref_clock_ratio; FBDiv = (VCOF_kHz / pll->topresc / FREF); Rest = (VCOF_kHz / pll->topresc) - FBDiv * FREF; if (Rest < LPF) Rest = 0; else if (Rest < 2 * LPF) Rest = 2 * LPF; else if (Rest > (FREF - LPF)) { Rest = 0; FBDiv += 1; } else if (Rest > (FREF - 2 * LPF)) Rest = FREF - 2 * LPF; Rest = (Rest * 6528) / (FREF / 10); state->rest = Rest; /* external loop filter, otherwise: * lo5 = (0 << 15) | (0 << 12) | (0 << 11) | (3 << 9) | (4 << 6) | (3 << 4) | 4; * lo6 = 0x0e34 */ if (Rest == 0) { if (pll->vco_band) lo5 = 0x049f; else lo5 = 0x041f; } else { if (pll->vco_band) lo5 = 0x049e; else if (state->config->analog_output) lo5 = 0x041d; else lo5 = 0x041c; } if (state->identity.p1g) { /* Bias is done automatically in P1G */ if (state->identity.in_soc) { if (state->identity.version == SOC_8090_P1G_11R1) lo5 = 0x46f; else lo5 = 0x42f; } else lo5 = 0x42c; } lo5 |= (pll->hfdiv_code << 11) | (pll->vco_band << 7); /* bit 15 is the split to the slave, we do not do it here */ if (!state->config->io.pll_int_loop_filt) { if (state->identity.in_soc) lo6 = 0xff98; else if (state->identity.p1g || (Rest == 0)) lo6 = 0xfff8; else lo6 = 0xff28; } else lo6 = (state->config->io.pll_int_loop_filt << 3); Den = 1; if (Rest > 0) { if (state->config->analog_output) lo6 |= (1 << 2) | 2; else { if (state->identity.in_soc) lo6 |= (1 << 2) | 2; else lo6 |= (1 << 2) | 2; } Den = 255; } dib0090_write_reg(state, 0x15, (u16) FBDiv); if (state->config->fref_clock_ratio != 0) dib0090_write_reg(state, 0x16, (Den << 8) | state->config->fref_clock_ratio); else dib0090_write_reg(state, 0x16, (Den << 8) | 1); dib0090_write_reg(state, 0x17, (u16) Rest); dib0090_write_reg(state, 0x19, lo5); dib0090_write_reg(state, 0x1c, lo6); lo6 = tune->tuner_enable; if (state->config->analog_output) lo6 = (lo6 & 0xff9f) | 0x2; dib0090_write_reg(state, 0x24, lo6 | EN_LO | state->config->use_pwm_agc * EN_CRYSTAL); } state->current_rf = state->rf_request; state->current_standard = state->fe->dtv_property_cache.delivery_system; ret = 20; state->calibrate = CAPTRIM_CAL; /* captrim serach now */ } else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */ const struct dib0090_wbd_slope *wbd = state->current_wbd_table; while (state->current_rf / 1000 > wbd->max_freq) wbd++; dib0090_write_reg(state, 0x1e, 0x07ff); dprintk("Final Captrim: %d", (u32) state->fcaptrim); dprintk("HFDIV code: %d", (u32) pll->hfdiv_code); dprintk("VCO = %d", (u32) pll->vco_band); dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request); dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz); dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17)); dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8), (u32) dib0090_read_reg(state, 0x1c) & 0x3); #define WBD 0x781 /* 1 1 1 1 0000 0 0 1 */ c = 4; i = 3; if (wbd->wbd_gain != 0) c = wbd->wbd_gain; state->wbdmux = (c << 13) | (i << 11) | (WBD | (state->config->use_pwm_agc << 1)); dib0090_write_reg(state, 0x10, state->wbdmux); if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) { dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune); dib0090_write_reg(state, 0x09, tune->lna_bias); dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim)); } else dib0090_write_reg(state, 0x09, (tune->lna_tune << 5) | tune->lna_bias); dib0090_write_reg(state, 0x0c, tune->v2i); dib0090_write_reg(state, 0x0d, tune->mix); dib0090_write_reg(state, 0x0e, tune->load); *tune_state = CT_TUNER_STEP_1; } else if (*tune_state == CT_TUNER_STEP_1) { /* initialize the lt gain register */ state->rf_lt_def = 0x7c00; dib0090_set_bandwidth(state); state->tuner_is_tuned = 1; state->calibrate |= WBD_CAL; state->calibrate |= TEMP_CAL; *tune_state = CT_TUNER_STOP; } else ret = FE_CALLBACK_TIME_NEVER; return ret; } static int dib0090_release(struct dvb_frontend *fe) { kfree(fe->tuner_priv); fe->tuner_priv = NULL; return 0; } enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; return state->tune_state; } EXPORT_SYMBOL(dib0090_get_tune_state); int dib0090_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state) { struct dib0090_state *state = fe->tuner_priv; state->tune_state = tune_state; return 0; } EXPORT_SYMBOL(dib0090_set_tune_state); static int dib0090_get_frequency(struct dvb_frontend *fe, u32 * frequency) { struct dib0090_state *state = fe->tuner_priv; *frequency = 1000 * state->current_rf; return 0; } static int dib0090_set_params(struct dvb_frontend *fe) { struct dib0090_state *state = fe->tuner_priv; u32 ret; state->tune_state = CT_TUNER_START; do { ret = dib0090_tune(fe); if (ret != FE_CALLBACK_TIME_NEVER) msleep(ret / 10); else break; } while (state->tune_state != CT_TUNER_STOP); return 0; } static const struct dvb_tuner_ops dib0090_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = dib0090_wakeup, .sleep = dib0090_sleep, .set_params = dib0090_set_params, .get_frequency = dib0090_get_frequency, }; static const struct dvb_tuner_ops dib0090_fw_ops = { .info = { .name = "DiBcom DiB0090", .frequency_min = 45000000, .frequency_max = 860000000, .frequency_step = 1000, }, .release = dib0090_release, .init = NULL, .sleep = NULL, .set_params = NULL, .get_frequency = NULL, }; static const struct dib0090_wbd_slope dib0090_wbd_table_default[] = { {470, 0, 250, 0, 100, 4}, {860, 51, 866, 21, 375, 4}, {1700, 0, 800, 0, 850, 4}, {2900, 0, 250, 0, 100, 6}, {0xFFFF, 0, 0, 0, 0, 0}, }; struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_state *st = kzalloc(sizeof(struct dib0090_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (config->wbd == NULL) st->current_wbd_table = dib0090_wbd_table_default; else st->current_wbd_table = config->wbd; if (dib0090_reset(fe) != 0) goto free_mem; printk(KERN_INFO "DiB0090: successfully identified\n"); memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_register); struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_adapter *i2c, const struct dib0090_config *config) { struct dib0090_fw_state *st = kzalloc(sizeof(struct dib0090_fw_state), GFP_KERNEL); if (st == NULL) return NULL; st->config = config; st->i2c = i2c; st->fe = fe; mutex_init(&st->i2c_buffer_lock); fe->tuner_priv = st; if (dib0090_fw_reset_digital(fe, st->config) != 0) goto free_mem; dprintk("DiB0090 FW: successfully identified"); memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops)); return fe; free_mem: kfree(st); fe->tuner_priv = NULL; return NULL; } EXPORT_SYMBOL(dib0090_fw_register); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner"); MODULE_LICENSE("GPL");
gpl-2.0
k2wl/KernelEvolution
drivers/power/isp1704_charger.c
2947
12855
/* * ISP1704 USB Charger Detection driver * * Copyright (C) 2010 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/delay.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/power/isp1704_charger.h> /* Vendor specific Power Control register */ #define ISP1704_PWR_CTRL 0x3d #define ISP1704_PWR_CTRL_SWCTRL (1 << 0) #define ISP1704_PWR_CTRL_DET_COMP (1 << 1) #define ISP1704_PWR_CTRL_BVALID_RISE (1 << 2) #define ISP1704_PWR_CTRL_BVALID_FALL (1 << 3) #define ISP1704_PWR_CTRL_DP_WKPU_EN (1 << 4) #define ISP1704_PWR_CTRL_VDAT_DET (1 << 5) #define ISP1704_PWR_CTRL_DPVSRC_EN (1 << 6) #define ISP1704_PWR_CTRL_HWDETECT (1 << 7) #define NXP_VENDOR_ID 0x04cc static u16 isp170x_id[] = { 0x1704, 0x1707, }; struct isp1704_charger { struct device *dev; struct power_supply psy; struct otg_transceiver *otg; struct notifier_block nb; struct work_struct work; /* properties */ char model[8]; unsigned present:1; unsigned online:1; unsigned current_max; /* temp storage variables */ unsigned long event; unsigned max_power; }; /* * Disable/enable the power from the isp1704 if a function for it * has been provided with platform data. */ static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on) { struct isp1704_charger_data *board = isp->dev->platform_data; if (board->set_power) board->set_power(on); } /* * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB * chargers). * * REVISIT: The method is defined in Battery Charging Specification and is * applicable to any ULPI transceiver. Nothing isp170x specific here. */ static inline int isp1704_charger_type(struct isp1704_charger *isp) { u8 reg; u8 func_ctrl; u8 otg_ctrl; int type = POWER_SUPPLY_TYPE_USB_DCP; func_ctrl = otg_io_read(isp->otg, ULPI_FUNC_CTRL); otg_ctrl = otg_io_read(isp->otg, ULPI_OTG_CTRL); /* disable pulldowns */ reg = ULPI_OTG_CTRL_DM_PULLDOWN | ULPI_OTG_CTRL_DP_PULLDOWN; otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), reg); /* full speed */ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL), ULPI_FUNC_CTRL_XCVRSEL_MASK); otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), ULPI_FUNC_CTRL_FULL_SPEED); /* Enable strong pull-up on DP (1.5K) and reset */ reg = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET; otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), reg); usleep_range(1000, 2000); reg = otg_io_read(isp->otg, ULPI_DEBUG); if ((reg & 3) != 3) type = POWER_SUPPLY_TYPE_USB_CDP; /* recover original state */ otg_io_write(isp->otg, ULPI_FUNC_CTRL, func_ctrl); otg_io_write(isp->otg, ULPI_OTG_CTRL, otg_ctrl); return type; } /* * ISP1704 detects PS/2 adapters as charger. To make sure the detected charger * is actually a dedicated charger, the following steps need to be taken. */ static inline int isp1704_charger_verify(struct isp1704_charger *isp) { int ret = 0; u8 r; /* Reset the transceiver */ r = otg_io_read(isp->otg, ULPI_FUNC_CTRL); r |= ULPI_FUNC_CTRL_RESET; otg_io_write(isp->otg, ULPI_FUNC_CTRL, r); usleep_range(1000, 2000); /* Set normal mode */ r &= ~(ULPI_FUNC_CTRL_RESET | ULPI_FUNC_CTRL_OPMODE_MASK); otg_io_write(isp->otg, ULPI_FUNC_CTRL, r); /* Clear the DP and DM pull-down bits */ r = ULPI_OTG_CTRL_DP_PULLDOWN | ULPI_OTG_CTRL_DM_PULLDOWN; otg_io_write(isp->otg, ULPI_CLR(ULPI_OTG_CTRL), r); /* Enable strong pull-up on DP (1.5K) and reset */ r = ULPI_FUNC_CTRL_TERMSELECT | ULPI_FUNC_CTRL_RESET; otg_io_write(isp->otg, ULPI_SET(ULPI_FUNC_CTRL), r); usleep_range(1000, 2000); /* Read the line state */ if (!otg_io_read(isp->otg, ULPI_DEBUG)) { /* Disable strong pull-up on DP (1.5K) */ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL), ULPI_FUNC_CTRL_TERMSELECT); return 1; } /* Is it a charger or PS/2 connection */ /* Enable weak pull-up resistor on DP */ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), ISP1704_PWR_CTRL_DP_WKPU_EN); /* Disable strong pull-up on DP (1.5K) */ otg_io_write(isp->otg, ULPI_CLR(ULPI_FUNC_CTRL), ULPI_FUNC_CTRL_TERMSELECT); /* Enable weak pull-down resistor on DM */ otg_io_write(isp->otg, ULPI_SET(ULPI_OTG_CTRL), ULPI_OTG_CTRL_DM_PULLDOWN); /* It's a charger if the line states are clear */ if (!(otg_io_read(isp->otg, ULPI_DEBUG))) ret = 1; /* Disable weak pull-up resistor on DP */ otg_io_write(isp->otg, ULPI_CLR(ISP1704_PWR_CTRL), ISP1704_PWR_CTRL_DP_WKPU_EN); return ret; } static inline int isp1704_charger_detect(struct isp1704_charger *isp) { unsigned long timeout; u8 pwr_ctrl; int ret = 0; pwr_ctrl = otg_io_read(isp->otg, ISP1704_PWR_CTRL); /* set SW control bit in PWR_CTRL register */ otg_io_write(isp->otg, ISP1704_PWR_CTRL, ISP1704_PWR_CTRL_SWCTRL); /* enable manual charger detection */ otg_io_write(isp->otg, ULPI_SET(ISP1704_PWR_CTRL), ISP1704_PWR_CTRL_SWCTRL | ISP1704_PWR_CTRL_DPVSRC_EN); usleep_range(1000, 2000); timeout = jiffies + msecs_to_jiffies(300); do { /* Check if there is a charger */ if (otg_io_read(isp->otg, ISP1704_PWR_CTRL) & ISP1704_PWR_CTRL_VDAT_DET) { ret = isp1704_charger_verify(isp); break; } } while (!time_after(jiffies, timeout) && isp->online); /* recover original state */ otg_io_write(isp->otg, ISP1704_PWR_CTRL, pwr_ctrl); return ret; } static void isp1704_charger_work(struct work_struct *data) { int detect; unsigned long event; unsigned power; struct isp1704_charger *isp = container_of(data, struct isp1704_charger, work); static DEFINE_MUTEX(lock); event = isp->event; power = isp->max_power; mutex_lock(&lock); if (event != USB_EVENT_NONE) isp1704_charger_set_power(isp, 1); switch (event) { case USB_EVENT_VBUS: isp->online = true; /* detect charger */ detect = isp1704_charger_detect(isp); if (detect) { isp->present = detect; isp->psy.type = isp1704_charger_type(isp); } switch (isp->psy.type) { case POWER_SUPPLY_TYPE_USB_DCP: isp->current_max = 1800; break; case POWER_SUPPLY_TYPE_USB_CDP: /* * Only 500mA here or high speed chirp * handshaking may break */ isp->current_max = 500; /* FALLTHROUGH */ case POWER_SUPPLY_TYPE_USB: default: /* enable data pullups */ if (isp->otg->gadget) usb_gadget_connect(isp->otg->gadget); } break; case USB_EVENT_NONE: isp->online = false; isp->current_max = 0; isp->present = 0; isp->current_max = 0; isp->psy.type = POWER_SUPPLY_TYPE_USB; /* * Disable data pullups. We need to prevent the controller from * enumerating. * * FIXME: This is here to allow charger detection with Host/HUB * chargers. The pullups may be enabled elsewhere, so this can * not be the final solution. */ if (isp->otg->gadget) usb_gadget_disconnect(isp->otg->gadget); isp1704_charger_set_power(isp, 0); break; case USB_EVENT_ENUMERATED: if (isp->present) isp->current_max = 1800; else isp->current_max = power; break; default: goto out; } power_supply_changed(&isp->psy); out: mutex_unlock(&lock); } static int isp1704_notifier_call(struct notifier_block *nb, unsigned long event, void *power) { struct isp1704_charger *isp = container_of(nb, struct isp1704_charger, nb); isp->event = event; if (power) isp->max_power = *((unsigned *)power); schedule_work(&isp->work); return NOTIFY_OK; } static int isp1704_charger_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct isp1704_charger *isp = container_of(psy, struct isp1704_charger, psy); switch (psp) { case POWER_SUPPLY_PROP_PRESENT: val->intval = isp->present; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = isp->online; break; case POWER_SUPPLY_PROP_CURRENT_MAX: val->intval = isp->current_max; break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = isp->model; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "NXP"; break; default: return -EINVAL; } return 0; } static enum power_supply_property power_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, }; static inline int isp1704_test_ulpi(struct isp1704_charger *isp) { int vendor; int product; int i; int ret = -ENODEV; /* Test ULPI interface */ ret = otg_io_write(isp->otg, ULPI_SCRATCH, 0xaa); if (ret < 0) return ret; ret = otg_io_read(isp->otg, ULPI_SCRATCH); if (ret < 0) return ret; if (ret != 0xaa) return -ENODEV; /* Verify the product and vendor id matches */ vendor = otg_io_read(isp->otg, ULPI_VENDOR_ID_LOW); vendor |= otg_io_read(isp->otg, ULPI_VENDOR_ID_HIGH) << 8; if (vendor != NXP_VENDOR_ID) return -ENODEV; product = otg_io_read(isp->otg, ULPI_PRODUCT_ID_LOW); product |= otg_io_read(isp->otg, ULPI_PRODUCT_ID_HIGH) << 8; for (i = 0; i < ARRAY_SIZE(isp170x_id); i++) { if (product == isp170x_id[i]) { sprintf(isp->model, "isp%x", product); return product; } } dev_err(isp->dev, "product id %x not matching known ids", product); return -ENODEV; } static int __devinit isp1704_charger_probe(struct platform_device *pdev) { struct isp1704_charger *isp; int ret = -ENODEV; isp = kzalloc(sizeof *isp, GFP_KERNEL); if (!isp) return -ENOMEM; isp->otg = otg_get_transceiver(); if (!isp->otg) goto fail0; isp->dev = &pdev->dev; platform_set_drvdata(pdev, isp); isp1704_charger_set_power(isp, 1); ret = isp1704_test_ulpi(isp); if (ret < 0) goto fail1; isp->psy.name = "isp1704"; isp->psy.type = POWER_SUPPLY_TYPE_USB; isp->psy.properties = power_props; isp->psy.num_properties = ARRAY_SIZE(power_props); isp->psy.get_property = isp1704_charger_get_property; ret = power_supply_register(isp->dev, &isp->psy); if (ret) goto fail1; /* * REVISIT: using work in order to allow the otg notifications to be * made atomically in the future. */ INIT_WORK(&isp->work, isp1704_charger_work); isp->nb.notifier_call = isp1704_notifier_call; ret = otg_register_notifier(isp->otg, &isp->nb); if (ret) goto fail2; dev_info(isp->dev, "registered with product id %s\n", isp->model); /* * Taking over the D+ pullup. * * FIXME: The device will be disconnected if it was already * enumerated. The charger driver should be always loaded before any * gadget is loaded. */ if (isp->otg->gadget) usb_gadget_disconnect(isp->otg->gadget); /* Detect charger if VBUS is valid (the cable was already plugged). */ ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); isp1704_charger_set_power(isp, 0); if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { isp->event = USB_EVENT_VBUS; schedule_work(&isp->work); } return 0; fail2: power_supply_unregister(&isp->psy); fail1: otg_put_transceiver(isp->otg); fail0: kfree(isp); dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret); return ret; } static int __devexit isp1704_charger_remove(struct platform_device *pdev) { struct isp1704_charger *isp = platform_get_drvdata(pdev); otg_unregister_notifier(isp->otg, &isp->nb); power_supply_unregister(&isp->psy); otg_put_transceiver(isp->otg); isp1704_charger_set_power(isp, 0); kfree(isp); return 0; } static struct platform_driver isp1704_charger_driver = { .driver = { .name = "isp1704_charger", }, .probe = isp1704_charger_probe, .remove = __devexit_p(isp1704_charger_remove), }; static int __init isp1704_charger_init(void) { return platform_driver_register(&isp1704_charger_driver); } module_init(isp1704_charger_init); static void __exit isp1704_charger_exit(void) { platform_driver_unregister(&isp1704_charger_driver); } module_exit(isp1704_charger_exit); MODULE_ALIAS("platform:isp1704_charger"); MODULE_AUTHOR("Nokia Corporation"); MODULE_DESCRIPTION("ISP170x USB Charger driver"); MODULE_LICENSE("GPL");
gpl-2.0
Team-Blackout/Blackout_Jewel_plus
drivers/block/mtip32xx/mtip32xx.c
4483
101409
/* * Driver for the Micron P320 SSD * Copyright (C) 2011 Micron Technology, Inc. * * Portions of this code were derived from works subjected to the * following copyright: * Copyright (C) 2009 Integrated Device Technology, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/ata.h> #include <linux/delay.h> #include <linux/hdreg.h> #include <linux/uaccess.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/compat.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/kthread.h> #include <../drivers/ata/ahci.h> #include <linux/export.h> #include "mtip32xx.h" #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) #define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16)) #define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS) #define HW_PORT_PRIV_DMA_SZ \ (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ) #define HOST_CAP_NZDMA (1 << 19) #define HOST_HSORG 0xFC #define HSORG_DISABLE_SLOTGRP_INTR (1<<24) #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16) #define HSORG_HWREV 0xFF00 #define HSORG_STYLE 0x8 #define HSORG_SLOTGROUPS 0x7 #define PORT_COMMAND_ISSUE 0x38 #define PORT_SDBV 0x7C #define PORT_OFFSET 0x100 #define PORT_MEM_SIZE 0x80 #define PORT_IRQ_ERR \ (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \ PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \ PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \ PORT_IRQ_OVERFLOW) #define PORT_IRQ_LEGACY \ (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) #define PORT_IRQ_HANDLED \ (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \ PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \ PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY) #define DEF_PORT_IRQ \ (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS) /* product numbers */ #define MTIP_PRODUCT_UNKNOWN 0x00 #define MTIP_PRODUCT_ASICFPGA 0x11 /* Device instance number, incremented each time a device is probed. */ static int instance; /* * Global variable used to hold the major block device number * allocated in mtip_init(). */ static int mtip_major; static DEFINE_SPINLOCK(rssd_index_lock); static DEFINE_IDA(rssd_index_ida); static int mtip_block_initialize(struct driver_data *dd); #ifdef CONFIG_COMPAT struct mtip_compat_ide_task_request_s { __u8 io_ports[8]; __u8 hob_ports[8]; ide_reg_valid_t out_flags; ide_reg_valid_t in_flags; int data_phase; int req_cmd; compat_ulong_t out_size; compat_ulong_t in_size; }; #endif /* * This function check_for_surprise_removal is called * while card is removed from the system and it will * read the vendor id from the configration space * * @pdev Pointer to the pci_dev structure. * * return value * true if device removed, else false */ static bool mtip_check_surprise_removal(struct pci_dev *pdev) { u16 vendor_id = 0; /* Read the vendorID from the configuration space */ pci_read_config_word(pdev, 0x00, &vendor_id); if (vendor_id == 0xFFFF) return true; /* device removed */ return false; /* device present */ } /* * This function is called for clean the pending command in the * command slot during the surprise removal of device and return * error to the upper layer. * * @dd Pointer to the DRIVER_DATA structure. * * return value * None */ static void mtip_command_cleanup(struct driver_data *dd) { int group = 0, commandslot = 0, commandindex = 0; struct mtip_cmd *command; struct mtip_port *port = dd->port; static int in_progress; if (in_progress) return; in_progress = 1; for (group = 0; group < 4; group++) { for (commandslot = 0; commandslot < 32; commandslot++) { if (!(port->allocated[group] & (1 << commandslot))) continue; commandindex = group << 5 | commandslot; command = &port->commands[commandindex]; if (atomic_read(&command->active) && (command->async_callback)) { command->async_callback(command->async_data, -ENODEV); command->async_callback = NULL; command->async_data = NULL; } dma_unmap_sg(&port->dd->pdev->dev, command->sg, command->scatter_ents, command->direction); } } up(&port->cmd_slot); set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); in_progress = 0; } /* * Obtain an empty command slot. * * This function needs to be reentrant since it could be called * at the same time on multiple CPUs. The allocation of the * command slot must be atomic. * * @port Pointer to the port data structure. * * return value * >= 0 Index of command slot obtained. * -1 No command slots available. */ static int get_slot(struct mtip_port *port) { int slot, i; unsigned int num_command_slots = port->dd->slot_groups * 32; /* * Try 10 times, because there is a small race here. * that's ok, because it's still cheaper than a lock. * * Race: Since this section is not protected by lock, same bit * could be chosen by different process contexts running in * different processor. So instead of costly lock, we are going * with loop. */ for (i = 0; i < 10; i++) { slot = find_next_zero_bit(port->allocated, num_command_slots, 1); if ((slot < num_command_slots) && (!test_and_set_bit(slot, port->allocated))) return slot; } dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); if (mtip_check_surprise_removal(port->dd->pdev)) { /* Device not present, clean outstanding commands */ mtip_command_cleanup(port->dd); } return -1; } /* * Release a command slot. * * @port Pointer to the port data structure. * @tag Tag of command to release * * return value * None */ static inline void release_slot(struct mtip_port *port, int tag) { smp_mb__before_clear_bit(); clear_bit(tag, port->allocated); smp_mb__after_clear_bit(); } /* * Reset the HBA (without sleeping) * * Just like hba_reset, except does not call sleep, so can be * run from interrupt/tasklet context. * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear. */ static int hba_reset_nosleep(struct driver_data *dd) { unsigned long timeout; /* Chip quirk: quiesce any chip function */ mdelay(10); /* Set the reset bit */ writel(HOST_RESET, dd->mmio + HOST_CTL); /* Flush */ readl(dd->mmio + HOST_CTL); /* * Wait 10ms then spin for up to 1 second * waiting for reset acknowledgement */ timeout = jiffies + msecs_to_jiffies(1000); mdelay(10); while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) && time_before(jiffies, timeout)) mdelay(1); if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) return -1; if (readl(dd->mmio + HOST_CTL) & HOST_RESET) return -1; return 0; } /* * Issue a command to the hardware. * * Set the appropriate bit in the s_active and Command Issue hardware * registers, causing hardware command processing to begin. * * @port Pointer to the port structure. * @tag The tag of the command to be issued. * * return value * None */ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) { unsigned long flags = 0; atomic_set(&port->commands[tag].active, 1); spin_lock_irqsave(&port->cmd_issue_lock, flags); writel((1 << MTIP_TAG_BIT(tag)), port->s_active[MTIP_TAG_INDEX(tag)]); writel((1 << MTIP_TAG_BIT(tag)), port->cmd_issue[MTIP_TAG_INDEX(tag)]); spin_unlock_irqrestore(&port->cmd_issue_lock, flags); /* Set the command's timeout value.*/ port->commands[tag].comp_time = jiffies + msecs_to_jiffies( MTIP_NCQ_COMMAND_TIMEOUT_MS); } /* * Enable/disable the reception of FIS * * @port Pointer to the port data structure * @enable 1 to enable, 0 to disable * * return value * Previous state: 1 enabled, 0 disabled */ static int mtip_enable_fis(struct mtip_port *port, int enable) { u32 tmp; /* enable FIS reception */ tmp = readl(port->mmio + PORT_CMD); if (enable) writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD); else writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD); /* Flush */ readl(port->mmio + PORT_CMD); return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX)); } /* * Enable/disable the DMA engine * * @port Pointer to the port data structure * @enable 1 to enable, 0 to disable * * return value * Previous state: 1 enabled, 0 disabled. */ static int mtip_enable_engine(struct mtip_port *port, int enable) { u32 tmp; /* enable FIS reception */ tmp = readl(port->mmio + PORT_CMD); if (enable) writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD); else writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD); readl(port->mmio + PORT_CMD); return (((tmp & PORT_CMD_START) == PORT_CMD_START)); } /* * Enables the port DMA engine and FIS reception. * * return value * None */ static inline void mtip_start_port(struct mtip_port *port) { /* Enable FIS reception */ mtip_enable_fis(port, 1); /* Enable the DMA engine */ mtip_enable_engine(port, 1); } /* * Deinitialize a port by disabling port interrupts, the DMA engine, * and FIS reception. * * @port Pointer to the port structure * * return value * None */ static inline void mtip_deinit_port(struct mtip_port *port) { /* Disable interrupts on this port */ writel(0, port->mmio + PORT_IRQ_MASK); /* Disable the DMA engine */ mtip_enable_engine(port, 0); /* Disable FIS reception */ mtip_enable_fis(port, 0); } /* * Initialize a port. * * This function deinitializes the port by calling mtip_deinit_port() and * then initializes it by setting the command header and RX FIS addresses, * clearing the SError register and any pending port interrupts before * re-enabling the default set of port interrupts. * * @port Pointer to the port structure. * * return value * None */ static void mtip_init_port(struct mtip_port *port) { int i; mtip_deinit_port(port); /* Program the command list base and FIS base addresses */ if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) { writel((port->command_list_dma >> 16) >> 16, port->mmio + PORT_LST_ADDR_HI); writel((port->rxfis_dma >> 16) >> 16, port->mmio + PORT_FIS_ADDR_HI); } writel(port->command_list_dma & 0xFFFFFFFF, port->mmio + PORT_LST_ADDR); writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR); /* Clear SError */ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR); /* reset the completed registers.*/ for (i = 0; i < port->dd->slot_groups; i++) writel(0xFFFFFFFF, port->completed[i]); /* Clear any pending interrupts for this port */ writel(readl(port->dd->mmio + PORT_IRQ_STAT), port->dd->mmio + PORT_IRQ_STAT); /* Clear any pending interrupts on the HBA. */ writel(readl(port->dd->mmio + HOST_IRQ_STAT), port->dd->mmio + HOST_IRQ_STAT); /* Enable port interrupts */ writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK); } /* * Restart a port * * @port Pointer to the port data structure. * * return value * None */ static void mtip_restart_port(struct mtip_port *port) { unsigned long timeout; /* Disable the DMA engine */ mtip_enable_engine(port, 0); /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */ timeout = jiffies + msecs_to_jiffies(500); while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) && time_before(jiffies, timeout)) ; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return; /* * Chip quirk: escalate to hba reset if * PxCMD.CR not clear after 500 ms */ if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) { dev_warn(&port->dd->pdev->dev, "PxCMD.CR not clear, escalating reset\n"); if (hba_reset_nosleep(port->dd)) dev_err(&port->dd->pdev->dev, "HBA reset escalation failed.\n"); /* 30 ms delay before com reset to quiesce chip */ mdelay(30); } dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n"); /* Set PxSCTL.DET */ writel(readl(port->mmio + PORT_SCR_CTL) | 1, port->mmio + PORT_SCR_CTL); readl(port->mmio + PORT_SCR_CTL); /* Wait 1 ms to quiesce chip function */ timeout = jiffies + msecs_to_jiffies(1); while (time_before(jiffies, timeout)) ; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return; /* Clear PxSCTL.DET */ writel(readl(port->mmio + PORT_SCR_CTL) & ~1, port->mmio + PORT_SCR_CTL); readl(port->mmio + PORT_SCR_CTL); /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */ timeout = jiffies + msecs_to_jiffies(500); while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) && time_before(jiffies, timeout)) ; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return; if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) dev_warn(&port->dd->pdev->dev, "COM reset failed\n"); mtip_init_port(port); mtip_start_port(port); } /* * Helper function for tag logging */ static void print_tags(struct driver_data *dd, char *msg, unsigned long *tagbits, int cnt) { unsigned char tagmap[128]; int group, tagmap_len = 0; memset(tagmap, 0, sizeof(tagmap)); for (group = SLOTBITS_IN_LONGS; group > 0; group--) tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ", tagbits[group-1]); dev_warn(&dd->pdev->dev, "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); } /* * Called periodically to see if any read/write commands are * taking too long to complete. * * @data Pointer to the PORT data structure. * * return value * None */ static void mtip_timeout_function(unsigned long int data) { struct mtip_port *port = (struct mtip_port *) data; struct host_to_dev_fis *fis; struct mtip_cmd *command; int tag, cmdto_cnt = 0; unsigned int bit, group; unsigned int num_command_slots = port->dd->slot_groups * 32; unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; if (unlikely(!port)) return; if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { mod_timer(&port->cmd_timer, jiffies + msecs_to_jiffies(30000)); return; } /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); for (tag = 0; tag < num_command_slots; tag++) { /* * Skip internal command slot as it has * its own timeout mechanism */ if (tag == MTIP_TAG_INTERNAL) continue; if (atomic_read(&port->commands[tag].active) && (time_after(jiffies, port->commands[tag].comp_time))) { group = tag >> 5; bit = tag & 0x1F; command = &port->commands[tag]; fis = (struct host_to_dev_fis *) command->command; set_bit(tag, tagaccum); cmdto_cnt++; if (cmdto_cnt == 1) set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); /* * Clear the completed bit. This should prevent * any interrupt handlers from trying to retire * the command. */ writel(1 << bit, port->completed[group]); /* Call the async completion callback. */ if (likely(command->async_callback)) command->async_callback(command->async_data, -EIO); command->async_callback = NULL; command->comp_func = NULL; /* Unmap the DMA scatter list entries */ dma_unmap_sg(&port->dd->pdev->dev, command->sg, command->scatter_ents, command->direction); /* * Clear the allocated bit and active tag for the * command. */ atomic_set(&port->commands[tag].active, 0); release_slot(port, tag); up(&port->cmd_slot); } } if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); mtip_restart_port(port); clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); } if (port->ic_pause_timer) { to = port->ic_pause_timer + msecs_to_jiffies(1000); if (time_after(jiffies, to)) { if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { port->ic_pause_timer = 0; clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); } } } /* Restart the timer */ mod_timer(&port->cmd_timer, jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); } /* * IO completion function. * * This completion function is called by the driver ISR when a * command that was issued by the kernel completes. It first calls the * asynchronous completion function which normally calls back into the block * layer passing the asynchronous callback data, then unmaps the * scatter list associated with the completed command, and finally * clears the allocated bit associated with the completed command. * * @port Pointer to the port data structure. * @tag Tag of the command. * @data Pointer to driver_data. * @status Completion status. * * return value * None */ static void mtip_async_complete(struct mtip_port *port, int tag, void *data, int status) { struct mtip_cmd *command; struct driver_data *dd = data; int cb_status = status ? -EIO : 0; if (unlikely(!dd) || unlikely(!port)) return; command = &port->commands[tag]; if (unlikely(status == PORT_IRQ_TF_ERR)) { dev_warn(&port->dd->pdev->dev, "Command tag %d failed due to TFE\n", tag); } /* Upper layer callback */ if (likely(command->async_callback)) command->async_callback(command->async_data, cb_status); command->async_callback = NULL; command->comp_func = NULL; /* Unmap the DMA scatter list entries */ dma_unmap_sg(&dd->pdev->dev, command->sg, command->scatter_ents, command->direction); /* Clear the allocated and active bits for the command */ atomic_set(&port->commands[tag].active, 0); release_slot(port, tag); up(&port->cmd_slot); } /* * Internal command completion callback function. * * This function is normally called by the driver ISR when an internal * command completed. This function signals the command completion by * calling complete(). * * @port Pointer to the port data structure. * @tag Tag of the command that has completed. * @data Pointer to a completion structure. * @status Completion status. * * return value * None */ static void mtip_completion(struct mtip_port *port, int tag, void *data, int status) { struct mtip_cmd *command = &port->commands[tag]; struct completion *waiting = data; if (unlikely(status == PORT_IRQ_TF_ERR)) dev_warn(&port->dd->pdev->dev, "Internal command %d completed with TFE\n", tag); command->async_callback = NULL; command->comp_func = NULL; complete(waiting); } static void mtip_null_completion(struct mtip_port *port, int tag, void *data, int status) { return; } static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, dma_addr_t buffer_dma, unsigned int sectors); static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, struct smart_attr *attrib); /* * Handle an error. * * @dd Pointer to the DRIVER_DATA structure. * * return value * None */ static void mtip_handle_tfe(struct driver_data *dd) { int group, tag, bit, reissue, rv; struct mtip_port *port; struct mtip_cmd *cmd; u32 completed; struct host_to_dev_fis *fis; unsigned long tagaccum[SLOTBITS_IN_LONGS]; unsigned int cmd_cnt = 0; unsigned char *buf; char *fail_reason = NULL; int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0; dev_warn(&dd->pdev->dev, "Taskfile error\n"); port = dd->port; /* Stop the timer to prevent command timeouts. */ del_timer(&port->cmd_timer); /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); /* Set eh_active */ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); /* Loop through all the groups */ for (group = 0; group < dd->slot_groups; group++) { completed = readl(port->completed[group]); /* clear completed status register in the hardware.*/ writel(completed, port->completed[group]); /* Process successfully completed commands */ for (bit = 0; bit < 32 && completed; bit++) { if (!(completed & (1<<bit))) continue; tag = (group << 5) + bit; /* Skip the internal command slot */ if (tag == MTIP_TAG_INTERNAL) continue; cmd = &port->commands[tag]; if (likely(cmd->comp_func)) { set_bit(tag, tagaccum); cmd_cnt++; atomic_set(&cmd->active, 0); cmd->comp_func(port, tag, cmd->comp_data, 0); } else { dev_err(&port->dd->pdev->dev, "Missing completion func for tag %d", tag); if (mtip_check_surprise_removal(dd->pdev)) { mtip_command_cleanup(dd); /* don't proceed further */ return; } } } } print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt); /* Restart the port */ mdelay(20); mtip_restart_port(port); /* Trying to determine the cause of the error */ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, dd->port->log_buf, dd->port->log_buf_dma, 1); if (rv) { dev_warn(&dd->pdev->dev, "Error in READ LOG EXT (10h) command\n"); /* non-critical error, don't fail the load */ } else { buf = (unsigned char *)dd->port->log_buf; if (buf[259] & 0x1) { dev_info(&dd->pdev->dev, "Write protect bit is set.\n"); set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); fail_all_ncq_write = 1; fail_reason = "write protect"; } if (buf[288] == 0xF7) { dev_info(&dd->pdev->dev, "Exceeded Tmax, drive in thermal shutdown.\n"); set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); fail_all_ncq_cmds = 1; fail_reason = "thermal shutdown"; } if (buf[288] == 0xBF) { dev_info(&dd->pdev->dev, "Drive indicates rebuild has failed.\n"); fail_all_ncq_cmds = 1; fail_reason = "rebuild failed"; } } /* clear the tag accumulator */ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); /* Loop through all the groups */ for (group = 0; group < dd->slot_groups; group++) { for (bit = 0; bit < 32; bit++) { reissue = 1; tag = (group << 5) + bit; cmd = &port->commands[tag]; /* If the active bit is set re-issue the command */ if (atomic_read(&cmd->active) == 0) continue; fis = (struct host_to_dev_fis *)cmd->command; /* Should re-issue? */ if (tag == MTIP_TAG_INTERNAL || fis->command == ATA_CMD_SET_FEATURES) reissue = 0; else { if (fail_all_ncq_cmds || (fail_all_ncq_write && fis->command == ATA_CMD_FPDMA_WRITE)) { dev_warn(&dd->pdev->dev, " Fail: %s w/tag %d [%s].\n", fis->command == ATA_CMD_FPDMA_WRITE ? "write" : "read", tag, fail_reason != NULL ? fail_reason : "unknown"); atomic_set(&cmd->active, 0); if (cmd->comp_func) { cmd->comp_func(port, tag, cmd->comp_data, -ENODATA); } continue; } } /* * First check if this command has * exceeded its retries. */ if (reissue && (cmd->retries-- > 0)) { set_bit(tag, tagaccum); /* Re-issue the command. */ mtip_issue_ncq_command(port, tag); continue; } /* Retire a command that will not be reissued */ dev_warn(&port->dd->pdev->dev, "retiring tag %d\n", tag); atomic_set(&cmd->active, 0); if (cmd->comp_func) cmd->comp_func( port, tag, cmd->comp_data, PORT_IRQ_TF_ERR); else dev_warn(&port->dd->pdev->dev, "Bad completion for tag %d\n", tag); } } print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); /* clear eh_active */ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); mod_timer(&port->cmd_timer, jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); } /* * Handle a set device bits interrupt */ static inline void mtip_process_sdbf(struct driver_data *dd) { struct mtip_port *port = dd->port; int group, tag, bit; u32 completed; struct mtip_cmd *command; /* walk all bits in all slot groups */ for (group = 0; group < dd->slot_groups; group++) { completed = readl(port->completed[group]); /* clear completed status register in the hardware.*/ writel(completed, port->completed[group]); /* Process completed commands. */ for (bit = 0; (bit < 32) && completed; bit++, completed >>= 1) { if (completed & 0x01) { tag = (group << 5) | bit; /* skip internal command slot. */ if (unlikely(tag == MTIP_TAG_INTERNAL)) continue; command = &port->commands[tag]; /* make internal callback */ if (likely(command->comp_func)) { command->comp_func( port, tag, command->comp_data, 0); } else { dev_warn(&dd->pdev->dev, "Null completion " "for tag %d", tag); if (mtip_check_surprise_removal( dd->pdev)) { mtip_command_cleanup(dd); return; } } } } } } /* * Process legacy pio and d2h interrupts */ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) { struct mtip_port *port = dd->port; struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL))) { if (cmd->comp_func) { cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd->comp_data, 0); return; } } return; } /* * Demux and handle errors */ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) { if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) mtip_handle_tfe(dd); if (unlikely(port_stat & PORT_IRQ_CONNECT)) { dev_warn(&dd->pdev->dev, "Clearing PxSERR.DIAG.x\n"); writel((1 << 26), dd->port->mmio + PORT_SCR_ERR); } if (unlikely(port_stat & PORT_IRQ_PHYRDY)) { dev_warn(&dd->pdev->dev, "Clearing PxSERR.DIAG.n\n"); writel((1 << 16), dd->port->mmio + PORT_SCR_ERR); } if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) { dev_warn(&dd->pdev->dev, "Port stat errors %x unhandled\n", (port_stat & ~PORT_IRQ_HANDLED)); } } static inline irqreturn_t mtip_handle_irq(struct driver_data *data) { struct driver_data *dd = (struct driver_data *) data; struct mtip_port *port = dd->port; u32 hba_stat, port_stat; int rv = IRQ_NONE; hba_stat = readl(dd->mmio + HOST_IRQ_STAT); if (hba_stat) { rv = IRQ_HANDLED; /* Acknowledge the interrupt status on the port.*/ port_stat = readl(port->mmio + PORT_IRQ_STAT); writel(port_stat, port->mmio + PORT_IRQ_STAT); /* Demux port status */ if (likely(port_stat & PORT_IRQ_SDB_FIS)) mtip_process_sdbf(dd); if (unlikely(port_stat & PORT_IRQ_ERR)) { if (unlikely(mtip_check_surprise_removal(dd->pdev))) { mtip_command_cleanup(dd); /* don't proceed further */ return IRQ_HANDLED; } if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) return rv; mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); } if (unlikely(port_stat & PORT_IRQ_LEGACY)) mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY); } /* acknowledge interrupt */ writel(hba_stat, dd->mmio + HOST_IRQ_STAT); return rv; } /* * Wrapper for mtip_handle_irq * (ignores return code) */ static void mtip_tasklet(unsigned long data) { mtip_handle_irq((struct driver_data *) data); } /* * HBA interrupt subroutine. * * @irq IRQ number. * @instance Pointer to the driver data structure. * * return value * IRQ_HANDLED A HBA interrupt was pending and handled. * IRQ_NONE This interrupt was not for the HBA. */ static irqreturn_t mtip_irq_handler(int irq, void *instance) { struct driver_data *dd = instance; tasklet_schedule(&dd->tasklet); return IRQ_HANDLED; } static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) { atomic_set(&port->commands[tag].active, 1); writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]); } static bool mtip_pause_ncq(struct mtip_port *port, struct host_to_dev_fis *fis) { struct host_to_dev_fis *reply; unsigned long task_file_data; reply = port->rxfis + RX_FIS_D2H_REG; task_file_data = readl(port->mmio+PORT_TFDATA); if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT)) return false; if (fis->command == ATA_CMD_SEC_ERASE_PREP) { set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); port->ic_pause_timer = jiffies; return true; } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && (fis->features == 0x03)) { set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); port->ic_pause_timer = jiffies; return true; } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) || ((fis->command == 0xFC) && (fis->features == 0x27 || fis->features == 0x72 || fis->features == 0x62 || fis->features == 0x26))) { /* Com reset after secure erase or lowlevel format */ mtip_restart_port(port); return false; } return false; } /* * Wait for port to quiesce * * @port Pointer to port data structure * @timeout Max duration to wait (ms) * * return value * 0 Success * -EBUSY Commands still active */ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) { unsigned long to; unsigned int n; unsigned int active = 1; to = jiffies + msecs_to_jiffies(timeout); do { if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { msleep(20); continue; /* svc thd is actively issuing commands */ } if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return -EFAULT; /* * Ignore s_active bit 0 of array element 0. * This bit will always be set */ active = readl(port->s_active[0]) & 0xFFFFFFFE; for (n = 1; n < port->dd->slot_groups; n++) active |= readl(port->s_active[n]); if (!active) break; msleep(20); } while (time_before(jiffies, to)); return active ? -EBUSY : 0; } /* * Execute an internal command and wait for the completion. * * @port Pointer to the port data structure. * @fis Pointer to the FIS that describes the command. * @fis_len Length in WORDS of the FIS. * @buffer DMA accessible for command data. * @buf_len Length, in bytes, of the data buffer. * @opts Command header options, excluding the FIS length * and the number of PRD entries. * @timeout Time in ms to wait for the command to complete. * * return value * 0 Command completed successfully. * -EFAULT The buffer address is not correctly aligned. * -EBUSY Internal command or other IO in progress. * -EAGAIN Time out waiting for command to complete. */ static int mtip_exec_internal_command(struct mtip_port *port, struct host_to_dev_fis *fis, int fis_len, dma_addr_t buffer, int buf_len, u32 opts, gfp_t atomic, unsigned long timeout) { struct mtip_cmd_sg *command_sg; DECLARE_COMPLETION_ONSTACK(wait); int rv = 0, ready2go = 1; struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; unsigned long to; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { dev_err(&port->dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); return -EFAULT; } to = jiffies + msecs_to_jiffies(timeout); do { ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated); if (ready2go) break; mdelay(100); } while (time_before(jiffies, to)); if (!ready2go) { dev_warn(&port->dd->pdev->dev, "Internal cmd active. new cmd [%02X]\n", fis->command); return -EBUSY; } set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; if (fis->command == ATA_CMD_SEC_ERASE_UNIT) clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ if (mtip_quiesce_io(port, 5000) < 0) { dev_warn(&port->dd->pdev->dev, "Failed to quiesce IO\n"); release_slot(port, MTIP_TAG_INTERNAL); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); return -EBUSY; } } /* Set the completion function and data for the command. */ int_cmd->comp_data = &wait; int_cmd->comp_func = mtip_completion; } else { /* Clear completion - we're going to poll */ int_cmd->comp_data = NULL; int_cmd->comp_func = mtip_null_completion; } /* Copy the command to the command table */ memcpy(int_cmd->command, fis, fis_len*4); /* Populate the SG list */ int_cmd->command_header->opts = __force_bit2int cpu_to_le32(opts | fis_len); if (buf_len) { command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ; command_sg->info = __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF); command_sg->dba = __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF); command_sg->dba_upper = __force_bit2int cpu_to_le32((buffer >> 16) >> 16); int_cmd->command_header->opts |= __force_bit2int cpu_to_le32((1 << 16)); } /* Populate the command header */ int_cmd->command_header->byte_count = 0; /* Issue the command to the hardware */ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); /* Poll if atomic, wait_for_completion otherwise */ if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ if (wait_for_completion_timeout( &wait, msecs_to_jiffies(timeout)) == 0) { dev_err(&port->dd->pdev->dev, "Internal command did not complete [%d] " "within timeout of %lu ms\n", atomic, timeout); if (mtip_check_surprise_removal(port->dd->pdev) || test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } rv = -EAGAIN; } if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) { dev_warn(&port->dd->pdev->dev, "Retiring internal command but CI is 1.\n"); if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) { hba_reset_nosleep(port->dd); rv = -ENXIO; } else { mtip_restart_port(port); rv = -EAGAIN; } goto exec_ic_exit; } } else { /* Spin for <timeout> checking if command still outstanding */ timeout = jiffies + msecs_to_jiffies(timeout); while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) && time_before(jiffies, timeout)) { if (mtip_check_surprise_removal(port->dd->pdev)) { rv = -ENXIO; goto exec_ic_exit; } if ((fis->command != ATA_CMD_STANDBYNOW1) && test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) { rv = -ENXIO; goto exec_ic_exit; } } if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL)) { dev_err(&port->dd->pdev->dev, "Internal command did not complete [atomic]\n"); rv = -EAGAIN; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) { hba_reset_nosleep(port->dd); rv = -ENXIO; } else { mtip_restart_port(port); rv = -EAGAIN; } } } exec_ic_exit: /* Clear the allocated and active bits for the internal command. */ atomic_set(&int_cmd->active, 0); release_slot(port, MTIP_TAG_INTERNAL); if (rv >= 0 && mtip_pause_ncq(port, fis)) { /* NCQ paused */ return rv; } clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); return rv; } /* * Byte-swap ATA ID strings. * * ATA identify data contains strings in byte-swapped 16-bit words. * They must be swapped (on all architectures) to be usable as C strings. * This function swaps bytes in-place. * * @buf The buffer location of the string * @len The number of bytes to swap * * return value * None */ static inline void ata_swap_string(u16 *buf, unsigned int len) { int i; for (i = 0; i < (len/2); i++) be16_to_cpus(&buf[i]); } /* * Request the device identity information. * * If a user space buffer is not specified, i.e. is NULL, the * identify information is still read from the drive and placed * into the identify data buffer (@e port->identify) in the * port data structure. * When the identify buffer contains valid identify information @e * port->identify_valid is non-zero. * * @port Pointer to the port structure. * @user_buffer A user space buffer where the identify data should be * copied. * * return value * 0 Command completed successfully. * -EFAULT An error occurred while coping data to the user buffer. * -1 Command failed. */ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) { int rv = 0; struct host_to_dev_fis fis; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) return -EFAULT; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = ATA_CMD_ID_ATA; /* Set the identify information as invalid. */ port->identify_valid = 0; /* Clear the identify information. */ memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS); /* Execute the command. */ if (mtip_exec_internal_command(port, &fis, 5, port->identify_dma, sizeof(u16) * ATA_ID_WORDS, 0, GFP_KERNEL, MTIP_INTERNAL_COMMAND_TIMEOUT_MS) < 0) { rv = -1; goto out; } /* * Perform any necessary byte-swapping. Yes, the kernel does in fact * perform field-sensitive swapping on the string fields. * See the kernel use of ata_id_string() for proof of this. */ #ifdef __LITTLE_ENDIAN ata_swap_string(port->identify + 27, 40); /* model string*/ ata_swap_string(port->identify + 23, 8); /* firmware string*/ ata_swap_string(port->identify + 10, 20); /* serial# string*/ #else { int i; for (i = 0; i < ATA_ID_WORDS; i++) port->identify[i] = le16_to_cpu(port->identify[i]); } #endif /* Set the identify buffer as valid. */ port->identify_valid = 1; if (user_buffer) { if (copy_to_user( user_buffer, port->identify, ATA_ID_WORDS * sizeof(u16))) { rv = -EFAULT; goto out; } } out: return rv; } /* * Issue a standby immediate command to the device. * * @port Pointer to the port structure. * * return value * 0 Command was executed successfully. * -1 An error occurred while executing the command. */ static int mtip_standby_immediate(struct mtip_port *port) { int rv; struct host_to_dev_fis fis; unsigned long start; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = ATA_CMD_STANDBYNOW1; start = jiffies; rv = mtip_exec_internal_command(port, &fis, 5, 0, 0, 0, GFP_ATOMIC, 15000); dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", jiffies_to_msecs(jiffies - start)); if (rv) dev_warn(&port->dd->pdev->dev, "STANDBY IMMEDIATE command failed.\n"); return rv; } /* * Issue a READ LOG EXT command to the device. * * @port pointer to the port structure. * @page page number to fetch * @buffer pointer to buffer * @buffer_dma dma address corresponding to @buffer * @sectors page length to fetch, in sectors * * return value * @rv return value from mtip_exec_internal_command() */ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, dma_addr_t buffer_dma, unsigned int sectors) { struct host_to_dev_fis fis; memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = ATA_CMD_READ_LOG_EXT; fis.sect_count = sectors & 0xFF; fis.sect_cnt_ex = (sectors >> 8) & 0xFF; fis.lba_low = page; fis.lba_mid = 0; fis.device = ATA_DEVICE_OBS; memset(buffer, 0, sectors * ATA_SECT_SIZE); return mtip_exec_internal_command(port, &fis, 5, buffer_dma, sectors * ATA_SECT_SIZE, 0, GFP_ATOMIC, MTIP_INTERNAL_COMMAND_TIMEOUT_MS); } /* * Issue a SMART READ DATA command to the device. * * @port pointer to the port structure. * @buffer pointer to buffer * @buffer_dma dma address corresponding to @buffer * * return value * @rv return value from mtip_exec_internal_command() */ static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer, dma_addr_t buffer_dma) { struct host_to_dev_fis fis; memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = ATA_CMD_SMART; fis.features = 0xD0; fis.sect_count = 1; fis.lba_mid = 0x4F; fis.lba_hi = 0xC2; fis.device = ATA_DEVICE_OBS; return mtip_exec_internal_command(port, &fis, 5, buffer_dma, ATA_SECT_SIZE, 0, GFP_ATOMIC, 15000); } /* * Get the value of a smart attribute * * @port pointer to the port structure * @id attribute number * @attrib pointer to return attrib information corresponding to @id * * return value * -EINVAL NULL buffer passed or unsupported attribute @id. * -EPERM Identify data not valid, SMART not supported or not enabled */ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, struct smart_attr *attrib) { int rv, i; struct smart_attr *pattr; if (!attrib) return -EINVAL; if (!port->identify_valid) { dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); return -EPERM; } if (!(port->identify[82] & 0x1)) { dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); return -EPERM; } if (!(port->identify[85] & 0x1)) { dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); return -EPERM; } memset(port->smart_buf, 0, ATA_SECT_SIZE); rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma); if (rv) { dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); return rv; } pattr = (struct smart_attr *)(port->smart_buf + 2); for (i = 0; i < 29; i++, pattr++) if (pattr->attr_id == id) { memcpy(attrib, pattr, sizeof(struct smart_attr)); break; } if (i == 29) { dev_warn(&port->dd->pdev->dev, "Query for invalid SMART attribute ID\n"); rv = -EINVAL; } return rv; } /* * Get the drive capacity. * * @dd Pointer to the device data structure. * @sectors Pointer to the variable that will receive the sector count. * * return value * 1 Capacity was returned successfully. * 0 The identify information is invalid. */ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) { struct mtip_port *port = dd->port; u64 total, raw0, raw1, raw2, raw3; raw0 = port->identify[100]; raw1 = port->identify[101]; raw2 = port->identify[102]; raw3 = port->identify[103]; total = raw0 | raw1<<16 | raw2<<32 | raw3<<48; *sectors = total; return (bool) !!port->identify_valid; } /* * Reset the HBA. * * Resets the HBA by setting the HBA Reset bit in the Global * HBA Control register. After setting the HBA Reset bit the * function waits for 1 second before reading the HBA Reset * bit to make sure it has cleared. If HBA Reset is not clear * an error is returned. Cannot be used in non-blockable * context. * * @dd Pointer to the driver data structure. * * return value * 0 The reset was successful. * -1 The HBA Reset bit did not clear. */ static int mtip_hba_reset(struct driver_data *dd) { mtip_deinit_port(dd->port); /* Set the reset bit */ writel(HOST_RESET, dd->mmio + HOST_CTL); /* Flush */ readl(dd->mmio + HOST_CTL); /* Wait for reset to clear */ ssleep(1); /* Check the bit has cleared */ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { dev_err(&dd->pdev->dev, "Reset bit did not clear.\n"); return -1; } return 0; } /* * Display the identify command data. * * @port Pointer to the port data structure. * * return value * None */ static void mtip_dump_identify(struct mtip_port *port) { sector_t sectors; unsigned short revid; char cbuf[42]; if (!port->identify_valid) return; strlcpy(cbuf, (char *)(port->identify+10), 21); dev_info(&port->dd->pdev->dev, "Serial No.: %s\n", cbuf); strlcpy(cbuf, (char *)(port->identify+23), 9); dev_info(&port->dd->pdev->dev, "Firmware Ver.: %s\n", cbuf); strlcpy(cbuf, (char *)(port->identify+27), 41); dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf); if (mtip_hw_get_capacity(port->dd, &sectors)) dev_info(&port->dd->pdev->dev, "Capacity: %llu sectors (%llu MB)\n", (u64)sectors, ((u64)sectors) * ATA_SECT_SIZE >> 20); pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid); switch (revid & 0xFF) { case 0x1: strlcpy(cbuf, "A0", 3); break; case 0x3: strlcpy(cbuf, "A2", 3); break; default: strlcpy(cbuf, "?", 2); break; } dev_info(&port->dd->pdev->dev, "Card Type: %s\n", cbuf); } /* * Map the commands scatter list into the command table. * * @command Pointer to the command. * @nents Number of scatter list entries. * * return value * None */ static inline void fill_command_sg(struct driver_data *dd, struct mtip_cmd *command, int nents) { int n; unsigned int dma_len; struct mtip_cmd_sg *command_sg; struct scatterlist *sg = command->sg; command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; for (n = 0; n < nents; n++) { dma_len = sg_dma_len(sg); if (dma_len > 0x400000) dev_err(&dd->pdev->dev, "DMA segment length truncated\n"); command_sg->info = __force_bit2int cpu_to_le32((dma_len-1) & 0x3FFFFF); command_sg->dba = __force_bit2int cpu_to_le32(sg_dma_address(sg)); command_sg->dba_upper = __force_bit2int cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); command_sg++; sg++; } } /* * @brief Execute a drive command. * * return value 0 The command completed successfully. * return value -1 An error occurred while executing the command. */ static int exec_drive_task(struct mtip_port *port, u8 *command) { struct host_to_dev_fis fis; struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = command[0]; fis.features = command[1]; fis.sect_count = command[2]; fis.sector = command[3]; fis.cyl_low = command[4]; fis.cyl_hi = command[5]; fis.device = command[6] & ~0x10; /* Clear the dev bit*/ dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", __func__, command[0], command[1], command[2], command[3], command[4], command[5], command[6]); /* Execute the command. */ if (mtip_exec_internal_command(port, &fis, 5, 0, 0, 0, GFP_KERNEL, MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) { return -1; } command[0] = reply->command; /* Status*/ command[1] = reply->features; /* Error*/ command[4] = reply->cyl_low; command[5] = reply->cyl_hi; dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n", __func__, command[0], command[1], command[4], command[5]); return 0; } /* * @brief Execute a drive command. * * @param port Pointer to the port data structure. * @param command Pointer to the user specified command parameters. * @param user_buffer Pointer to the user space buffer where read sector * data should be copied. * * return value 0 The command completed successfully. * return value -EFAULT An error occurred while copying the completion * data to the user space buffer. * return value -1 An error occurred while executing the command. */ static int exec_drive_command(struct mtip_port *port, u8 *command, void __user *user_buffer) { struct host_to_dev_fis fis; struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = command[0]; fis.features = command[2]; fis.sect_count = command[3]; if (fis.command == ATA_CMD_SMART) { fis.sector = command[1]; fis.cyl_low = 0x4F; fis.cyl_hi = 0xC2; } dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, sect %x, " "feat %x, sectcnt %x\n", __func__, command[0], command[1], command[2], command[3]); memset(port->sector_buffer, 0x00, ATA_SECT_SIZE); /* Execute the command. */ if (mtip_exec_internal_command(port, &fis, 5, port->sector_buffer_dma, (command[3] != 0) ? ATA_SECT_SIZE : 0, 0, GFP_KERNEL, MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) { return -1; } /* Collect the completion status. */ command[0] = reply->command; /* Status*/ command[1] = reply->features; /* Error*/ command[2] = command[3]; dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, " "err %x, cmd %x\n", __func__, command[0], command[1], command[2]); if (user_buffer && command[3]) { if (copy_to_user(user_buffer, port->sector_buffer, ATA_SECT_SIZE * command[3])) { return -EFAULT; } } return 0; } /* * Indicates whether a command has a single sector payload. * * @command passed to the device to perform the certain event. * @features passed to the device to perform the certain event. * * return value * 1 command is one that always has a single sector payload, * regardless of the value in the Sector Count field. * 0 otherwise * */ static unsigned int implicit_sector(unsigned char command, unsigned char features) { unsigned int rv = 0; /* list of commands that have an implicit sector count of 1 */ switch (command) { case ATA_CMD_SEC_SET_PASS: case ATA_CMD_SEC_UNLOCK: case ATA_CMD_SEC_ERASE_PREP: case ATA_CMD_SEC_ERASE_UNIT: case ATA_CMD_SEC_FREEZE_LOCK: case ATA_CMD_SEC_DISABLE_PASS: case ATA_CMD_PMP_READ: case ATA_CMD_PMP_WRITE: rv = 1; break; case ATA_CMD_SET_MAX: if (features == ATA_SET_MAX_UNLOCK) rv = 1; break; case ATA_CMD_SMART: if ((features == ATA_SMART_READ_VALUES) || (features == ATA_SMART_READ_THRESHOLDS)) rv = 1; break; case ATA_CMD_CONF_OVERLAY: if ((features == ATA_DCO_IDENTIFY) || (features == ATA_DCO_SET)) rv = 1; break; } return rv; } /* * Executes a taskfile * See ide_taskfile_ioctl() for derivation */ static int exec_drive_taskfile(struct driver_data *dd, void __user *buf, ide_task_request_t *req_task, int outtotal) { struct host_to_dev_fis fis; struct host_to_dev_fis *reply; u8 *outbuf = NULL; u8 *inbuf = NULL; dma_addr_t outbuf_dma = 0; dma_addr_t inbuf_dma = 0; dma_addr_t dma_buffer = 0; int err = 0; unsigned int taskin = 0; unsigned int taskout = 0; u8 nsect = 0; unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; unsigned int force_single_sector; unsigned int transfer_size; unsigned long task_file_data; int intotal = outtotal + req_task->out_size; taskout = req_task->out_size; taskin = req_task->in_size; /* 130560 = 512 * 0xFF*/ if (taskin > 130560 || taskout > 130560) { err = -EINVAL; goto abort; } if (taskout) { outbuf = kzalloc(taskout, GFP_KERNEL); if (outbuf == NULL) { err = -ENOMEM; goto abort; } if (copy_from_user(outbuf, buf + outtotal, taskout)) { err = -EFAULT; goto abort; } outbuf_dma = pci_map_single(dd->pdev, outbuf, taskout, DMA_TO_DEVICE); if (outbuf_dma == 0) { err = -ENOMEM; goto abort; } dma_buffer = outbuf_dma; } if (taskin) { inbuf = kzalloc(taskin, GFP_KERNEL); if (inbuf == NULL) { err = -ENOMEM; goto abort; } if (copy_from_user(inbuf, buf + intotal, taskin)) { err = -EFAULT; goto abort; } inbuf_dma = pci_map_single(dd->pdev, inbuf, taskin, DMA_FROM_DEVICE); if (inbuf_dma == 0) { err = -ENOMEM; goto abort; } dma_buffer = inbuf_dma; } /* only supports PIO and non-data commands from this ioctl. */ switch (req_task->data_phase) { case TASKFILE_OUT: nsect = taskout / ATA_SECT_SIZE; reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); break; case TASKFILE_IN: reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); break; case TASKFILE_NO_DATA: reply = (dd->port->rxfis + RX_FIS_D2H_REG); break; default: err = -EINVAL; goto abort; } /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); fis.type = 0x27; fis.opts = 1 << 7; fis.command = req_task->io_ports[7]; fis.features = req_task->io_ports[1]; fis.sect_count = req_task->io_ports[2]; fis.lba_low = req_task->io_ports[3]; fis.lba_mid = req_task->io_ports[4]; fis.lba_hi = req_task->io_ports[5]; /* Clear the dev bit*/ fis.device = req_task->io_ports[6] & ~0x10; if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) { req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS | (IDE_HOB_STD_IN_FLAGS << 8); fis.lba_low_ex = req_task->hob_ports[3]; fis.lba_mid_ex = req_task->hob_ports[4]; fis.lba_hi_ex = req_task->hob_ports[5]; fis.features_ex = req_task->hob_ports[1]; fis.sect_cnt_ex = req_task->hob_ports[2]; } else { req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; } force_single_sector = implicit_sector(fis.command, fis.features); if ((taskin || taskout) && (!fis.sect_count)) { if (nsect) fis.sect_count = nsect; else { if (!force_single_sector) { dev_warn(&dd->pdev->dev, "data movement but " "sect_count is 0\n"); err = -EINVAL; goto abort; } } } dbg_printk(MTIP_DRV_NAME " %s: cmd %x, feat %x, nsect %x," " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x," " head/dev %x\n", __func__, fis.command, fis.features, fis.sect_count, fis.lba_low, fis.lba_mid, fis.lba_hi, fis.device); switch (fis.command) { case ATA_CMD_DOWNLOAD_MICRO: /* Change timeout for Download Microcode to 2 minutes */ timeout = 120000; break; case ATA_CMD_SEC_ERASE_UNIT: /* Change timeout for Security Erase Unit to 4 minutes.*/ timeout = 240000; break; case ATA_CMD_STANDBYNOW1: /* Change timeout for standby immediate to 10 seconds.*/ timeout = 10000; break; case 0xF7: case 0xFA: /* Change timeout for vendor unique command to 10 secs */ timeout = 10000; break; case ATA_CMD_SMART: /* Change timeout for vendor unique command to 15 secs */ timeout = 15000; break; default: timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; break; } /* Determine the correct transfer size.*/ if (force_single_sector) transfer_size = ATA_SECT_SIZE; else transfer_size = ATA_SECT_SIZE * fis.sect_count; /* Execute the command.*/ if (mtip_exec_internal_command(dd->port, &fis, 5, dma_buffer, transfer_size, 0, GFP_KERNEL, timeout) < 0) { err = -EIO; goto abort; } task_file_data = readl(dd->port->mmio+PORT_TFDATA); if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) { reply = dd->port->rxfis + RX_FIS_PIO_SETUP; req_task->io_ports[7] = reply->control; } else { reply = dd->port->rxfis + RX_FIS_D2H_REG; req_task->io_ports[7] = reply->command; } /* reclaim the DMA buffers.*/ if (inbuf_dma) pci_unmap_single(dd->pdev, inbuf_dma, taskin, DMA_FROM_DEVICE); if (outbuf_dma) pci_unmap_single(dd->pdev, outbuf_dma, taskout, DMA_TO_DEVICE); inbuf_dma = 0; outbuf_dma = 0; /* return the ATA registers to the caller.*/ req_task->io_ports[1] = reply->features; req_task->io_ports[2] = reply->sect_count; req_task->io_ports[3] = reply->lba_low; req_task->io_ports[4] = reply->lba_mid; req_task->io_ports[5] = reply->lba_hi; req_task->io_ports[6] = reply->device; if (req_task->out_flags.all & 1) { req_task->hob_ports[3] = reply->lba_low_ex; req_task->hob_ports[4] = reply->lba_mid_ex; req_task->hob_ports[5] = reply->lba_hi_ex; req_task->hob_ports[1] = reply->features_ex; req_task->hob_ports[2] = reply->sect_cnt_ex; } dbg_printk(MTIP_DRV_NAME " %s: Completion: stat %x," "err %x, sect_cnt %x, lbalo %x," "lbamid %x, lbahi %x, dev %x\n", __func__, req_task->io_ports[7], req_task->io_ports[1], req_task->io_ports[2], req_task->io_ports[3], req_task->io_ports[4], req_task->io_ports[5], req_task->io_ports[6]); if (taskout) { if (copy_to_user(buf + outtotal, outbuf, taskout)) { err = -EFAULT; goto abort; } } if (taskin) { if (copy_to_user(buf + intotal, inbuf, taskin)) { err = -EFAULT; goto abort; } } abort: if (inbuf_dma) pci_unmap_single(dd->pdev, inbuf_dma, taskin, DMA_FROM_DEVICE); if (outbuf_dma) pci_unmap_single(dd->pdev, outbuf_dma, taskout, DMA_TO_DEVICE); kfree(outbuf); kfree(inbuf); return err; } /* * Handle IOCTL calls from the Block Layer. * * This function is called by the Block Layer when it receives an IOCTL * command that it does not understand. If the IOCTL command is not supported * this function returns -ENOTTY. * * @dd Pointer to the driver data structure. * @cmd IOCTL command passed from the Block Layer. * @arg IOCTL argument passed from the Block Layer. * * return value * 0 The IOCTL completed successfully. * -ENOTTY The specified command is not supported. * -EFAULT An error occurred copying data to a user space buffer. * -EIO An error occurred while executing the command. */ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, unsigned long arg) { switch (cmd) { case HDIO_GET_IDENTITY: if (mtip_get_identify(dd->port, (void __user *) arg) < 0) { dev_warn(&dd->pdev->dev, "Unable to read identity\n"); return -EIO; } break; case HDIO_DRIVE_CMD: { u8 drive_command[4]; /* Copy the user command info to our buffer. */ if (copy_from_user(drive_command, (void __user *) arg, sizeof(drive_command))) return -EFAULT; /* Execute the drive command. */ if (exec_drive_command(dd->port, drive_command, (void __user *) (arg+4))) return -EIO; /* Copy the status back to the users buffer. */ if (copy_to_user((void __user *) arg, drive_command, sizeof(drive_command))) return -EFAULT; break; } case HDIO_DRIVE_TASK: { u8 drive_command[7]; /* Copy the user command info to our buffer. */ if (copy_from_user(drive_command, (void __user *) arg, sizeof(drive_command))) return -EFAULT; /* Execute the drive command. */ if (exec_drive_task(dd->port, drive_command)) return -EIO; /* Copy the status back to the users buffer. */ if (copy_to_user((void __user *) arg, drive_command, sizeof(drive_command))) return -EFAULT; break; } case HDIO_DRIVE_TASKFILE: { ide_task_request_t req_task; int ret, outtotal; if (copy_from_user(&req_task, (void __user *) arg, sizeof(req_task))) return -EFAULT; outtotal = sizeof(req_task); ret = exec_drive_taskfile(dd, (void __user *) arg, &req_task, outtotal); if (copy_to_user((void __user *) arg, &req_task, sizeof(req_task))) return -EFAULT; return ret; } default: return -EINVAL; } return 0; } /* * Submit an IO to the hw * * This function is called by the block layer to issue an io * to the device. Upon completion, the callback function will * be called with the data parameter passed as the callback data. * * @dd Pointer to the driver data structure. * @start First sector to read. * @nsect Number of sectors to read. * @nents Number of entries in scatter list for the read command. * @tag The tag of this read command. * @callback Pointer to the function that should be called * when the read completes. * @data Callback data passed to the callback function * when the read completes. * @dir Direction (read or write) * * return value * None */ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start, int nsect, int nents, int tag, void *callback, void *data, int dir) { struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; struct mtip_cmd *command = &port->commands[tag]; int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); command->scatter_ents = nents; /* * The number of retries for this command before it is * reported as a failure to the upper layers. */ command->retries = MTIP_MAX_RETRIES; /* Fill out fis */ fis = command->command; fis->type = 0x27; fis->opts = 1 << 7; fis->command = (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF); *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF); fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; fis->sect_count = ((tag << 3) | (tag >> 5)); fis->sect_cnt_ex = 0; fis->control = 0; fis->res2 = 0; fis->res3 = 0; fill_command_sg(dd, command, nents); /* Populate the command header */ command->command_header->opts = __force_bit2int cpu_to_le32( (nents << 16) | 5 | AHCI_CMD_PREFETCH); command->command_header->byte_count = 0; /* * Set the completion function and data for the command * within this layer. */ command->comp_data = dd; command->comp_func = mtip_async_complete; command->direction = dma_dir; /* * Set the completion function and data for the command passed * from the upper layer. */ command->async_data = data; command->async_callback = callback; /* * To prevent this command from being issued * if an internal command is in progress or error handling is active. */ if (port->flags & MTIP_PF_PAUSE_IO) { set_bit(tag, port->cmds_to_issue); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); return; } /* Issue the command to the hardware */ mtip_issue_ncq_command(port, tag); return; } /* * Release a command slot. * * @dd Pointer to the driver data structure. * @tag Slot tag * * return value * None */ static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag) { release_slot(dd->port, tag); } /* * Obtain a command slot and return its associated scatter list. * * @dd Pointer to the driver data structure. * @tag Pointer to an int that will receive the allocated command * slot tag. * * return value * Pointer to the scatter list for the allocated command slot * or NULL if no command slots are available. */ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, int *tag) { /* * It is possible that, even with this semaphore, a thread * may think that no command slots are available. Therefore, we * need to make an attempt to get_slot(). */ down(&dd->port->cmd_slot); *tag = get_slot(dd->port); if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { up(&dd->port->cmd_slot); return NULL; } if (unlikely(*tag < 0)) { up(&dd->port->cmd_slot); return NULL; } return dd->port->commands[*tag].sg; } /* * Sysfs register/status dump. * * @dev Pointer to the device structure, passed by the kernrel. * @attr Pointer to the device_attribute structure passed by the kernel. * @buf Pointer to the char buffer that will receive the stats info. * * return value * The size, in bytes, of the data copied into buf. */ static ssize_t mtip_hw_show_registers(struct device *dev, struct device_attribute *attr, char *buf) { u32 group_allocated; struct driver_data *dd = dev_to_disk(dev)->private_data; int size = 0; int n; size += sprintf(&buf[size], "S ACTive:\n"); for (n = 0; n < dd->slot_groups; n++) size += sprintf(&buf[size], "0x%08x\n", readl(dd->port->s_active[n])); size += sprintf(&buf[size], "Command Issue:\n"); for (n = 0; n < dd->slot_groups; n++) size += sprintf(&buf[size], "0x%08x\n", readl(dd->port->cmd_issue[n])); size += sprintf(&buf[size], "Allocated:\n"); for (n = 0; n < dd->slot_groups; n++) { if (sizeof(long) > sizeof(u32)) group_allocated = dd->port->allocated[n/2] >> (32*(n&1)); else group_allocated = dd->port->allocated[n]; size += sprintf(&buf[size], "0x%08x\n", group_allocated); } size += sprintf(&buf[size], "Completed:\n"); for (n = 0; n < dd->slot_groups; n++) size += sprintf(&buf[size], "0x%08x\n", readl(dd->port->completed[n])); size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n", readl(dd->port->mmio + PORT_IRQ_STAT)); size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n", readl(dd->mmio + HOST_IRQ_STAT)); return size; } static ssize_t mtip_hw_show_status(struct device *dev, struct device_attribute *attr, char *buf) { struct driver_data *dd = dev_to_disk(dev)->private_data; int size = 0; if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) size += sprintf(buf, "%s", "thermal_shutdown\n"); else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) size += sprintf(buf, "%s", "write_protect\n"); else size += sprintf(buf, "%s", "online\n"); return size; } static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); /* * Create the sysfs related attributes. * * @dd Pointer to the driver data structure. * @kobj Pointer to the kobj for the block device. * * return value * 0 Operation completed successfully. * -EINVAL Invalid parameter. */ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) { if (!kobj || !dd) return -EINVAL; if (sysfs_create_file(kobj, &dev_attr_registers.attr)) dev_warn(&dd->pdev->dev, "Error creating 'registers' sysfs entry\n"); if (sysfs_create_file(kobj, &dev_attr_status.attr)) dev_warn(&dd->pdev->dev, "Error creating 'status' sysfs entry\n"); return 0; } /* * Remove the sysfs related attributes. * * @dd Pointer to the driver data structure. * @kobj Pointer to the kobj for the block device. * * return value * 0 Operation completed successfully. * -EINVAL Invalid parameter. */ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) { if (!kobj || !dd) return -EINVAL; sysfs_remove_file(kobj, &dev_attr_registers.attr); sysfs_remove_file(kobj, &dev_attr_status.attr); return 0; } /* * Perform any init/resume time hardware setup * * @dd Pointer to the driver data structure. * * return value * None */ static inline void hba_setup(struct driver_data *dd) { u32 hwdata; hwdata = readl(dd->mmio + HOST_HSORG); /* interrupt bug workaround: use only 1 IS bit.*/ writel(hwdata | HSORG_DISABLE_SLOTGRP_INTR | HSORG_DISABLE_SLOTGRP_PXIS, dd->mmio + HOST_HSORG); } /* * Detect the details of the product, and store anything needed * into the driver data structure. This includes product type and * version and number of slot groups. * * @dd Pointer to the driver data structure. * * return value * None */ static void mtip_detect_product(struct driver_data *dd) { u32 hwdata; unsigned int rev, slotgroups; /* * HBA base + 0xFC [15:0] - vendor-specific hardware interface * info register: * [15:8] hardware/software interface rev# * [ 3] asic-style interface * [ 2:0] number of slot groups, minus 1 (only valid for asic-style). */ hwdata = readl(dd->mmio + HOST_HSORG); dd->product_type = MTIP_PRODUCT_UNKNOWN; dd->slot_groups = 1; if (hwdata & 0x8) { dd->product_type = MTIP_PRODUCT_ASICFPGA; rev = (hwdata & HSORG_HWREV) >> 8; slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1; dev_info(&dd->pdev->dev, "ASIC-FPGA design, HS rev 0x%x, " "%i slot groups [%i slots]\n", rev, slotgroups, slotgroups * 32); if (slotgroups > MTIP_MAX_SLOT_GROUPS) { dev_warn(&dd->pdev->dev, "Warning: driver only supports " "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS); slotgroups = MTIP_MAX_SLOT_GROUPS; } dd->slot_groups = slotgroups; return; } dev_warn(&dd->pdev->dev, "Unrecognized product id\n"); } /* * Blocking wait for FTL rebuild to complete * * @dd Pointer to the DRIVER_DATA structure. * * return value * 0 FTL rebuild completed successfully * -EFAULT FTL rebuild error/timeout/interruption */ static int mtip_ftl_rebuild_poll(struct driver_data *dd) { unsigned long timeout, cnt = 0, start; dev_warn(&dd->pdev->dev, "FTL rebuild in progress. Polling for completion.\n"); start = jiffies; timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS); do { if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) return -EFAULT; if (mtip_check_surprise_removal(dd->pdev)) return -EFAULT; if (mtip_get_identify(dd->port, NULL) < 0) return -EFAULT; if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == MTIP_FTL_REBUILD_MAGIC) { ssleep(1); /* Print message every 3 minutes */ if (cnt++ >= 180) { dev_warn(&dd->pdev->dev, "FTL rebuild in progress (%d secs).\n", jiffies_to_msecs(jiffies - start) / 1000); cnt = 0; } } else { dev_warn(&dd->pdev->dev, "FTL rebuild complete (%d secs).\n", jiffies_to_msecs(jiffies - start) / 1000); mtip_block_initialize(dd); return 0; } ssleep(10); } while (time_before(jiffies, timeout)); /* Check for timeout */ dev_err(&dd->pdev->dev, "Timed out waiting for FTL rebuild to complete (%d secs).\n", jiffies_to_msecs(jiffies - start) / 1000); return -EFAULT; } /* * service thread to issue queued commands * * @data Pointer to the driver data structure. * * return value * 0 */ static int mtip_service_thread(void *data) { struct driver_data *dd = (struct driver_data *)data; unsigned long slot, slot_start, slot_wrap; unsigned int num_cmd_slots = dd->slot_groups * 32; struct mtip_port *port = dd->port; while (1) { /* * the condition is to check neither an internal command is * is in progress nor error handling is active */ wait_event_interruptible(port->svc_wait, (port->flags) && !(port->flags & MTIP_PF_PAUSE_IO)); if (kthread_should_stop()) break; if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) break; set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { slot = 1; /* used to restrict the loop to one iteration */ slot_start = num_cmd_slots; slot_wrap = 0; while (1) { slot = find_next_bit(port->cmds_to_issue, num_cmd_slots, slot); if (slot_wrap == 1) { if ((slot_start >= slot) || (slot >= num_cmd_slots)) break; } if (unlikely(slot_start == num_cmd_slots)) slot_start = slot; if (unlikely(slot == num_cmd_slots)) { slot = 1; slot_wrap = 1; continue; } /* Issue the command to the hardware */ mtip_issue_ncq_command(port, slot); clear_bit(slot, port->cmds_to_issue); } clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { if (!mtip_ftl_rebuild_poll(dd)) set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); } clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) break; } return 0; } /* * Called once for each card. * * @dd Pointer to the driver data structure. * * return value * 0 on success, else an error code. */ static int mtip_hw_init(struct driver_data *dd) { int i; int rv; unsigned int num_command_slots; unsigned long timeout, timetaken; unsigned char *buf; struct smart_attr attr242; dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; mtip_detect_product(dd); if (dd->product_type == MTIP_PRODUCT_UNKNOWN) { rv = -EIO; goto out1; } num_command_slots = dd->slot_groups * 32; hba_setup(dd); tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd); dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL); if (!dd->port) { dev_err(&dd->pdev->dev, "Memory allocation: port structure\n"); return -ENOMEM; } /* Counting semaphore to track command slot usage */ sema_init(&dd->port->cmd_slot, num_command_slots - 1); /* Spinlock to prevent concurrent issue */ spin_lock_init(&dd->port->cmd_issue_lock); /* Set the port mmio base address. */ dd->port->mmio = dd->mmio + PORT_OFFSET; dd->port->dd = dd; /* Allocate memory for the command list. */ dd->port->command_list = dmam_alloc_coherent(&dd->pdev->dev, HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4), &dd->port->command_list_dma, GFP_KERNEL); if (!dd->port->command_list) { dev_err(&dd->pdev->dev, "Memory allocation: command list\n"); rv = -ENOMEM; goto out1; } /* Clear the memory we have allocated. */ memset(dd->port->command_list, 0, HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4)); /* Setup the addresse of the RX FIS. */ dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ; dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ; /* Setup the address of the command tables. */ dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ; dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ; /* Setup the address of the identify data. */ dd->port->identify = dd->port->command_table + HW_CMD_TBL_AR_SZ; dd->port->identify_dma = dd->port->command_tbl_dma + HW_CMD_TBL_AR_SZ; /* Setup the address of the sector buffer - for some non-ncq cmds */ dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE; dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE; /* Setup the address of the log buf - for read log command */ dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE; dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE; /* Setup the address of the smart buf - for smart read data command */ dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE; dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE; /* Point the command headers at the command tables. */ for (i = 0; i < num_command_slots; i++) { dd->port->commands[i].command_header = dd->port->command_list + (sizeof(struct mtip_cmd_hdr) * i); dd->port->commands[i].command_header_dma = dd->port->command_list_dma + (sizeof(struct mtip_cmd_hdr) * i); dd->port->commands[i].command = dd->port->command_table + (HW_CMD_TBL_SZ * i); dd->port->commands[i].command_dma = dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i); if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64) dd->port->commands[i].command_header->ctbau = __force_bit2int cpu_to_le32( (dd->port->commands[i].command_dma >> 16) >> 16); dd->port->commands[i].command_header->ctba = __force_bit2int cpu_to_le32( dd->port->commands[i].command_dma & 0xFFFFFFFF); /* * If this is not done, a bug is reported by the stock * FC11 i386. Due to the fact that it has lots of kernel * debugging enabled. */ sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG); /* Mark all commands as currently inactive.*/ atomic_set(&dd->port->commands[i].active, 0); } /* Setup the pointers to the extended s_active and CI registers. */ for (i = 0; i < dd->slot_groups; i++) { dd->port->s_active[i] = dd->port->mmio + i*0x80 + PORT_SCR_ACT; dd->port->cmd_issue[i] = dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE; dd->port->completed[i] = dd->port->mmio + i*0x80 + PORT_SDBV; } timetaken = jiffies; timeout = jiffies + msecs_to_jiffies(30000); while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) && time_before(jiffies, timeout)) { mdelay(100); } if (unlikely(mtip_check_surprise_removal(dd->pdev))) { timetaken = jiffies - timetaken; dev_warn(&dd->pdev->dev, "Surprise removal detected at %u ms\n", jiffies_to_msecs(timetaken)); rv = -ENODEV; goto out2 ; } if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { timetaken = jiffies - timetaken; dev_warn(&dd->pdev->dev, "Removal detected at %u ms\n", jiffies_to_msecs(timetaken)); rv = -EFAULT; goto out2; } /* Conditionally reset the HBA. */ if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) { if (mtip_hba_reset(dd) < 0) { dev_err(&dd->pdev->dev, "Card did not reset within timeout\n"); rv = -EIO; goto out2; } } else { /* Clear any pending interrupts on the HBA */ writel(readl(dd->mmio + HOST_IRQ_STAT), dd->mmio + HOST_IRQ_STAT); } mtip_init_port(dd->port); mtip_start_port(dd->port); /* Setup the ISR and enable interrupts. */ rv = devm_request_irq(&dd->pdev->dev, dd->pdev->irq, mtip_irq_handler, IRQF_SHARED, dev_driver_string(&dd->pdev->dev), dd); if (rv) { dev_err(&dd->pdev->dev, "Unable to allocate IRQ %d\n", dd->pdev->irq); goto out2; } /* Enable interrupts on the HBA. */ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, dd->mmio + HOST_CTL); init_timer(&dd->port->cmd_timer); init_waitqueue_head(&dd->port->svc_wait); dd->port->cmd_timer.data = (unsigned long int) dd->port; dd->port->cmd_timer.function = mtip_timeout_function; mod_timer(&dd->port->cmd_timer, jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { rv = -EFAULT; goto out3; } if (mtip_get_identify(dd->port, NULL) < 0) { rv = -EFAULT; goto out3; } if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == MTIP_FTL_REBUILD_MAGIC) { set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); return MTIP_FTL_REBUILD_MAGIC; } mtip_dump_identify(dd->port); /* check write protect, over temp and rebuild statuses */ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, dd->port->log_buf, dd->port->log_buf_dma, 1); if (rv) { dev_warn(&dd->pdev->dev, "Error in READ LOG EXT (10h) command\n"); /* non-critical error, don't fail the load */ } else { buf = (unsigned char *)dd->port->log_buf; if (buf[259] & 0x1) { dev_info(&dd->pdev->dev, "Write protect bit is set.\n"); set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); } if (buf[288] == 0xF7) { dev_info(&dd->pdev->dev, "Exceeded Tmax, drive in thermal shutdown.\n"); set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); } if (buf[288] == 0xBF) { dev_info(&dd->pdev->dev, "Drive indicates rebuild has failed.\n"); /* TODO */ } } /* get write protect progess */ memset(&attr242, 0, sizeof(struct smart_attr)); if (mtip_get_smart_attr(dd->port, 242, &attr242)) dev_warn(&dd->pdev->dev, "Unable to check write protect progress\n"); else dev_info(&dd->pdev->dev, "Write protect progress: %d%% (%d blocks)\n", attr242.cur, attr242.data); return rv; out3: del_timer_sync(&dd->port->cmd_timer); /* Disable interrupts on the HBA. */ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, dd->mmio + HOST_CTL); /*Release the IRQ. */ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); out2: mtip_deinit_port(dd->port); /* Free the command/command header memory. */ dmam_free_coherent(&dd->pdev->dev, HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4), dd->port->command_list, dd->port->command_list_dma); out1: /* Free the memory allocated for the for structure. */ kfree(dd->port); return rv; } /* * Called to deinitialize an interface. * * @dd Pointer to the driver data structure. * * return value * 0 */ static int mtip_hw_exit(struct driver_data *dd) { /* * Send standby immediate (E0h) to the drive so that it * saves its state. */ if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) if (mtip_standby_immediate(dd->port)) dev_warn(&dd->pdev->dev, "STANDBY IMMEDIATE failed\n"); /* de-initialize the port. */ mtip_deinit_port(dd->port); /* Disable interrupts on the HBA. */ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, dd->mmio + HOST_CTL); } del_timer_sync(&dd->port->cmd_timer); /* Release the IRQ. */ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); /* Stop the bottom half tasklet. */ tasklet_kill(&dd->tasklet); /* Free the command/command header memory. */ dmam_free_coherent(&dd->pdev->dev, HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4), dd->port->command_list, dd->port->command_list_dma); /* Free the memory allocated for the for structure. */ kfree(dd->port); return 0; } /* * Issue a Standby Immediate command to the device. * * This function is called by the Block Layer just before the * system powers off during a shutdown. * * @dd Pointer to the driver data structure. * * return value * 0 */ static int mtip_hw_shutdown(struct driver_data *dd) { /* * Send standby immediate (E0h) to the drive so that it * saves its state. */ mtip_standby_immediate(dd->port); return 0; } /* * Suspend function * * This function is called by the Block Layer just before the * system hibernates. * * @dd Pointer to the driver data structure. * * return value * 0 Suspend was successful * -EFAULT Suspend was not successful */ static int mtip_hw_suspend(struct driver_data *dd) { /* * Send standby immediate (E0h) to the drive * so that it saves its state. */ if (mtip_standby_immediate(dd->port) != 0) { dev_err(&dd->pdev->dev, "Failed standby-immediate command\n"); return -EFAULT; } /* Disable interrupts on the HBA.*/ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, dd->mmio + HOST_CTL); mtip_deinit_port(dd->port); return 0; } /* * Resume function * * This function is called by the Block Layer as the * system resumes. * * @dd Pointer to the driver data structure. * * return value * 0 Resume was successful * -EFAULT Resume was not successful */ static int mtip_hw_resume(struct driver_data *dd) { /* Perform any needed hardware setup steps */ hba_setup(dd); /* Reset the HBA */ if (mtip_hba_reset(dd) != 0) { dev_err(&dd->pdev->dev, "Unable to reset the HBA\n"); return -EFAULT; } /* * Enable the port, DMA engine, and FIS reception specific * h/w in controller. */ mtip_init_port(dd->port); mtip_start_port(dd->port); /* Enable interrupts on the HBA.*/ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, dd->mmio + HOST_CTL); return 0; } /* * Helper function for reusing disk name * upon hot insertion. */ static int rssd_disk_name_format(char *prefix, int index, char *buf, int buflen) { const int base = 'z' - 'a' + 1; char *begin = buf + strlen(prefix); char *end = buf + buflen; char *p; int unit; p = end - 1; *p = '\0'; unit = base; do { if (p == begin) return -EINVAL; *--p = 'a' + (index % unit); index = (index / unit) - 1; } while (index >= 0); memmove(begin, p, end - p); memcpy(buf, prefix, strlen(prefix)); return 0; } /* * Block layer IOCTL handler. * * @dev Pointer to the block_device structure. * @mode ignored * @cmd IOCTL command passed from the user application. * @arg Argument passed from the user application. * * return value * 0 IOCTL completed successfully. * -ENOTTY IOCTL not supported or invalid driver data * structure pointer. */ static int mtip_block_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg) { struct driver_data *dd = dev->bd_disk->private_data; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!dd) return -ENOTTY; if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) return -ENOTTY; switch (cmd) { case BLKFLSBUF: return -ENOTTY; default: return mtip_hw_ioctl(dd, cmd, arg); } } #ifdef CONFIG_COMPAT /* * Block layer compat IOCTL handler. * * @dev Pointer to the block_device structure. * @mode ignored * @cmd IOCTL command passed from the user application. * @arg Argument passed from the user application. * * return value * 0 IOCTL completed successfully. * -ENOTTY IOCTL not supported or invalid driver data * structure pointer. */ static int mtip_block_compat_ioctl(struct block_device *dev, fmode_t mode, unsigned cmd, unsigned long arg) { struct driver_data *dd = dev->bd_disk->private_data; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!dd) return -ENOTTY; if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) return -ENOTTY; switch (cmd) { case BLKFLSBUF: return -ENOTTY; case HDIO_DRIVE_TASKFILE: { struct mtip_compat_ide_task_request_s __user *compat_req_task; ide_task_request_t req_task; int compat_tasksize, outtotal, ret; compat_tasksize = sizeof(struct mtip_compat_ide_task_request_s); compat_req_task = (struct mtip_compat_ide_task_request_s __user *) arg; if (copy_from_user(&req_task, (void __user *) arg, compat_tasksize - (2 * sizeof(compat_long_t)))) return -EFAULT; if (get_user(req_task.out_size, &compat_req_task->out_size)) return -EFAULT; if (get_user(req_task.in_size, &compat_req_task->in_size)) return -EFAULT; outtotal = sizeof(struct mtip_compat_ide_task_request_s); ret = exec_drive_taskfile(dd, (void __user *) arg, &req_task, outtotal); if (copy_to_user((void __user *) arg, &req_task, compat_tasksize - (2 * sizeof(compat_long_t)))) return -EFAULT; if (put_user(req_task.out_size, &compat_req_task->out_size)) return -EFAULT; if (put_user(req_task.in_size, &compat_req_task->in_size)) return -EFAULT; return ret; } default: return mtip_hw_ioctl(dd, cmd, arg); } } #endif /* * Obtain the geometry of the device. * * You may think that this function is obsolete, but some applications, * fdisk for example still used CHS values. This function describes the * device as having 224 heads and 56 sectors per cylinder. These values are * chosen so that each cylinder is aligned on a 4KB boundary. Since a * partition is described in terms of a start and end cylinder this means * that each partition is also 4KB aligned. Non-aligned partitions adversely * affects performance. * * @dev Pointer to the block_device strucutre. * @geo Pointer to a hd_geometry structure. * * return value * 0 Operation completed successfully. * -ENOTTY An error occurred while reading the drive capacity. */ static int mtip_block_getgeo(struct block_device *dev, struct hd_geometry *geo) { struct driver_data *dd = dev->bd_disk->private_data; sector_t capacity; if (!dd) return -ENOTTY; if (!(mtip_hw_get_capacity(dd, &capacity))) { dev_warn(&dd->pdev->dev, "Could not get drive capacity.\n"); return -ENOTTY; } geo->heads = 224; geo->sectors = 56; sector_div(capacity, (geo->heads * geo->sectors)); geo->cylinders = capacity; return 0; } /* * Block device operation function. * * This structure contains pointers to the functions required by the block * layer. */ static const struct block_device_operations mtip_block_ops = { .ioctl = mtip_block_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mtip_block_compat_ioctl, #endif .getgeo = mtip_block_getgeo, .owner = THIS_MODULE }; /* * Block layer make request function. * * This function is called by the kernel to process a BIO for * the P320 device. * * @queue Pointer to the request queue. Unused other than to obtain * the driver data structure. * @bio Pointer to the BIO. * */ static void mtip_make_request(struct request_queue *queue, struct bio *bio) { struct driver_data *dd = queue->queuedata; struct scatterlist *sg; struct bio_vec *bvec; int nents = 0; int tag = 0; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { bio_endio(bio, -ENXIO); return; } if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { bio_endio(bio, -ENODATA); return; } if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && bio_data_dir(bio))) { bio_endio(bio, -ENODATA); return; } } if (unlikely(!bio_has_data(bio))) { blk_queue_flush(queue, 0); bio_endio(bio, 0); return; } sg = mtip_hw_get_scatterlist(dd, &tag); if (likely(sg != NULL)) { blk_queue_bounce(queue, &bio); if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { dev_warn(&dd->pdev->dev, "Maximum number of SGL entries exceeded\n"); bio_io_error(bio); mtip_hw_release_scatterlist(dd, tag); return; } /* Create the scatter list for this bio. */ bio_for_each_segment(bvec, bio, nents) { sg_set_page(&sg[nents], bvec->bv_page, bvec->bv_len, bvec->bv_offset); } /* Issue the read/write. */ mtip_hw_submit_io(dd, bio->bi_sector, bio_sectors(bio), nents, tag, bio_endio, bio, bio_data_dir(bio)); } else bio_io_error(bio); } /* * Block layer initialization function. * * This function is called once by the PCI layer for each P320 * device that is connected to the system. * * @dd Pointer to the driver data structure. * * return value * 0 on success else an error code. */ static int mtip_block_initialize(struct driver_data *dd) { int rv = 0, wait_for_rebuild = 0; sector_t capacity; unsigned int index = 0; struct kobject *kobj; unsigned char thd_name[16]; if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ /* Initialize the protocol layer. */ wait_for_rebuild = mtip_hw_init(dd); if (wait_for_rebuild < 0) { dev_err(&dd->pdev->dev, "Protocol layer initialization failed\n"); rv = -EINVAL; goto protocol_init_error; } dd->disk = alloc_disk(MTIP_MAX_MINORS); if (dd->disk == NULL) { dev_err(&dd->pdev->dev, "Unable to allocate gendisk structure\n"); rv = -EINVAL; goto alloc_disk_error; } /* Generate the disk name, implemented same as in sd.c */ do { if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) goto ida_get_error; spin_lock(&rssd_index_lock); rv = ida_get_new(&rssd_index_ida, &index); spin_unlock(&rssd_index_lock); } while (rv == -EAGAIN); if (rv) goto ida_get_error; rv = rssd_disk_name_format("rssd", index, dd->disk->disk_name, DISK_NAME_LEN); if (rv) goto disk_index_error; dd->disk->driverfs_dev = &dd->pdev->dev; dd->disk->major = dd->major; dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS; dd->disk->fops = &mtip_block_ops; dd->disk->private_data = dd; dd->index = index; /* * if rebuild pending, start the service thread, and delay the block * queue creation and add_disk() */ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) goto start_service_thread; skip_create_disk: /* Allocate the request queue. */ dd->queue = blk_alloc_queue(GFP_KERNEL); if (dd->queue == NULL) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); rv = -ENOMEM; goto block_queue_alloc_init_error; } /* Attach our request function to the request queue. */ blk_queue_make_request(dd->queue, mtip_make_request); dd->disk->queue = dd->queue; dd->queue->queuedata = dd; /* Set device limits. */ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_physical_block_size(dd->queue, 4096); blk_queue_io_min(dd->queue, 4096); /* * write back cache is not supported in the device. FUA depends on * write back cache support, hence setting flush support to zero. */ blk_queue_flush(dd->queue, 0); /* Set the capacity of the device in 512 byte sectors. */ if (!(mtip_hw_get_capacity(dd, &capacity))) { dev_warn(&dd->pdev->dev, "Could not read drive capacity\n"); rv = -EIO; goto read_capacity_error; } set_capacity(dd->disk, capacity); /* Enable the block device and add it to /dev */ add_disk(dd->disk); /* * Now that the disk is active, initialize any sysfs attributes * managed by the protocol layer. */ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); if (kobj) { mtip_hw_sysfs_init(dd, kobj); kobject_put(kobj); } if (dd->mtip_svc_handler) { set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); return rv; /* service thread created for handling rebuild */ } start_service_thread: sprintf(thd_name, "mtip_svc_thd_%02d", index); dd->mtip_svc_handler = kthread_run(mtip_service_thread, dd, thd_name); if (IS_ERR(dd->mtip_svc_handler)) { dev_err(&dd->pdev->dev, "service thread failed to start\n"); dd->mtip_svc_handler = NULL; rv = -EFAULT; goto kthread_run_error; } if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) rv = wait_for_rebuild; return rv; kthread_run_error: /* Delete our gendisk. This also removes the device from /dev */ del_gendisk(dd->disk); read_capacity_error: blk_cleanup_queue(dd->queue); block_queue_alloc_init_error: disk_index_error: spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, index); spin_unlock(&rssd_index_lock); ida_get_error: put_disk(dd->disk); alloc_disk_error: mtip_hw_exit(dd); /* De-initialize the protocol layer. */ protocol_init_error: return rv; } /* * Block layer deinitialization function. * * Called by the PCI layer as each P320 device is removed. * * @dd Pointer to the driver data structure. * * return value * 0 */ static int mtip_block_remove(struct driver_data *dd) { struct kobject *kobj; if (dd->mtip_svc_handler) { set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); wake_up_interruptible(&dd->port->svc_wait); kthread_stop(dd->mtip_svc_handler); } /* Clean up the sysfs attributes, if created */ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); if (kobj) { mtip_hw_sysfs_exit(dd, kobj); kobject_put(kobj); } } /* * Delete our gendisk structure. This also removes the device * from /dev */ del_gendisk(dd->disk); spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); spin_unlock(&rssd_index_lock); blk_cleanup_queue(dd->queue); dd->disk = NULL; dd->queue = NULL; /* De-initialize the protocol layer. */ mtip_hw_exit(dd); return 0; } /* * Function called by the PCI layer when just before the * machine shuts down. * * If a protocol layer shutdown function is present it will be called * by this function. * * @dd Pointer to the driver data structure. * * return value * 0 */ static int mtip_block_shutdown(struct driver_data *dd) { dev_info(&dd->pdev->dev, "Shutting down %s ...\n", dd->disk->disk_name); /* Delete our gendisk structure, and cleanup the blk queue. */ del_gendisk(dd->disk); spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); spin_unlock(&rssd_index_lock); blk_cleanup_queue(dd->queue); dd->disk = NULL; dd->queue = NULL; mtip_hw_shutdown(dd); return 0; } static int mtip_block_suspend(struct driver_data *dd) { dev_info(&dd->pdev->dev, "Suspending %s ...\n", dd->disk->disk_name); mtip_hw_suspend(dd); return 0; } static int mtip_block_resume(struct driver_data *dd) { dev_info(&dd->pdev->dev, "Resuming %s ...\n", dd->disk->disk_name); mtip_hw_resume(dd); return 0; } /* * Called for each supported PCI device detected. * * This function allocates the private data structure, enables the * PCI device and then calls the block layer initialization function. * * return value * 0 on success else an error code. */ static int mtip_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int rv = 0; struct driver_data *dd = NULL; /* Allocate memory for this devices private data. */ dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL); if (dd == NULL) { dev_err(&pdev->dev, "Unable to allocate memory for driver data\n"); return -ENOMEM; } /* Attach the private data to this PCI device. */ pci_set_drvdata(pdev, dd); rv = pcim_enable_device(pdev); if (rv < 0) { dev_err(&pdev->dev, "Unable to enable device\n"); goto iomap_err; } /* Map BAR5 to memory. */ rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME); if (rv < 0) { dev_err(&pdev->dev, "Unable to map regions\n"); goto iomap_err; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rv) { rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rv) { dev_warn(&pdev->dev, "64-bit DMA enable failed\n"); goto setmask_err; } } } pci_set_master(pdev); if (pci_enable_msi(pdev)) { dev_warn(&pdev->dev, "Unable to enable MSI interrupt.\n"); goto block_initialize_err; } /* Copy the info we may need later into the private data structure. */ dd->major = mtip_major; dd->instance = instance; dd->pdev = pdev; /* Initialize the block layer. */ rv = mtip_block_initialize(dd); if (rv < 0) { dev_err(&pdev->dev, "Unable to initialize block layer\n"); goto block_initialize_err; } /* * Increment the instance count so that each device has a unique * instance number. */ instance++; if (rv != MTIP_FTL_REBUILD_MAGIC) set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); goto done; block_initialize_err: pci_disable_msi(pdev); setmask_err: pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); iomap_err: kfree(dd); pci_set_drvdata(pdev, NULL); return rv; done: return rv; } /* * Called for each probed device when the device is removed or the * driver is unloaded. * * return value * None */ static void mtip_pci_remove(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); int counter = 0; set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); if (mtip_check_surprise_removal(pdev)) { while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { counter++; msleep(20); if (counter == 10) { /* Cleanup the outstanding commands */ mtip_command_cleanup(dd); break; } } } /* Clean up the block layer. */ mtip_block_remove(dd); pci_disable_msi(pdev); kfree(dd); pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); } /* * Called for each probed device when the device is suspended. * * return value * 0 Success * <0 Error */ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) { int rv = 0; struct driver_data *dd = pci_get_drvdata(pdev); if (!dd) { dev_err(&pdev->dev, "Driver private datastructure is NULL\n"); return -EFAULT; } set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); /* Disable ports & interrupts then send standby immediate */ rv = mtip_block_suspend(dd); if (rv < 0) { dev_err(&pdev->dev, "Failed to suspend controller\n"); return rv; } /* * Save the pci config space to pdev structure & * disable the device */ pci_save_state(pdev); pci_disable_device(pdev); /* Move to Low power state*/ pci_set_power_state(pdev, PCI_D3hot); return rv; } /* * Called for each probed device when the device is resumed. * * return value * 0 Success * <0 Error */ static int mtip_pci_resume(struct pci_dev *pdev) { int rv = 0; struct driver_data *dd; dd = pci_get_drvdata(pdev); if (!dd) { dev_err(&pdev->dev, "Driver private datastructure is NULL\n"); return -EFAULT; } /* Move the device to active State */ pci_set_power_state(pdev, PCI_D0); /* Restore PCI configuration space */ pci_restore_state(pdev); /* Enable the PCI device*/ rv = pcim_enable_device(pdev); if (rv < 0) { dev_err(&pdev->dev, "Failed to enable card during resume\n"); goto err; } pci_set_master(pdev); /* * Calls hbaReset, initPort, & startPort function * then enables interrupts */ rv = mtip_block_resume(dd); if (rv < 0) dev_err(&pdev->dev, "Unable to resume\n"); err: clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); return rv; } /* * Shutdown routine * * return value * None */ static void mtip_pci_shutdown(struct pci_dev *pdev) { struct driver_data *dd = pci_get_drvdata(pdev); if (dd) mtip_block_shutdown(dd); } /* Table of device ids supported by this driver. */ static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) }, { 0 } }; /* Structure that describes the PCI driver functions. */ static struct pci_driver mtip_pci_driver = { .name = MTIP_DRV_NAME, .id_table = mtip_pci_tbl, .probe = mtip_pci_probe, .remove = mtip_pci_remove, .suspend = mtip_pci_suspend, .resume = mtip_pci_resume, .shutdown = mtip_pci_shutdown, }; MODULE_DEVICE_TABLE(pci, mtip_pci_tbl); /* * Module initialization function. * * Called once when the module is loaded. This function allocates a major * block device number to the Cyclone devices and registers the PCI layer * of the driver. * * Return value * 0 on success else error code. */ static int __init mtip_init(void) { int error; printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { printk(KERN_ERR "Unable to register block device (%d)\n", error); return -EBUSY; } mtip_major = error; /* Register our PCI operations. */ error = pci_register_driver(&mtip_pci_driver); if (error) unregister_blkdev(mtip_major, MTIP_DRV_NAME); return error; } /* * Module de-initialization function. * * Called once when the module is unloaded. This function deallocates * the major block device number allocated by mtip_init() and * unregisters the PCI layer of the driver. * * Return value * none */ static void __exit mtip_exit(void) { /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); /* Unregister the PCI driver. */ pci_unregister_driver(&mtip_pci_driver); } MODULE_AUTHOR("Micron Technology, Inc"); MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(MTIP_DRV_VERSION); module_init(mtip_init); module_exit(mtip_exit);
gpl-2.0
josalaito/android_kernel_motorola_msm8226
arch/arm/kernel/kprobes.c
4995
16879
/* * arch/arm/kernel/kprobes.c * * Kprobes on ARM * * Abhishek Sagar <sagar.abhishek@gmail.com> * Copyright (C) 2006, 2007 Motorola Inc. * * Nicolas Pitre <nico@marvell.com> * Copyright (C) 2007 Marvell Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/stringify.h> #include <asm/traps.h> #include <asm/cacheflush.h> #include "kprobes.h" #include "patch.h" #define MIN_STACK_SIZE(addr) \ min((unsigned long)MAX_STACK_SIZE, \ (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) #define flush_insns(addr, size) \ flush_icache_range((unsigned long)(addr), \ (unsigned long)(addr) + \ (size)) /* Used as a marker in ARM_pc to note when we're in a jprobe. */ #define JPROBE_MAGIC_ADDR 0xffffffff DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); int __kprobes arch_prepare_kprobe(struct kprobe *p) { kprobe_opcode_t insn; kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; unsigned long addr = (unsigned long)p->addr; bool thumb; kprobe_decode_insn_t *decode_insn; int is; if (in_exception_text(addr)) return -EINVAL; #ifdef CONFIG_THUMB2_KERNEL thumb = true; addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ insn = ((u16 *)addr)[0]; if (is_wide_instruction(insn)) { insn <<= 16; insn |= ((u16 *)addr)[1]; decode_insn = thumb32_kprobe_decode_insn; } else decode_insn = thumb16_kprobe_decode_insn; #else /* !CONFIG_THUMB2_KERNEL */ thumb = false; if (addr & 0x3) return -EINVAL; insn = *p->addr; decode_insn = arm_kprobe_decode_insn; #endif p->opcode = insn; p->ainsn.insn = tmp_insn; switch ((*decode_insn)(insn, &p->ainsn)) { case INSN_REJECTED: /* not supported */ return -EINVAL; case INSN_GOOD: /* instruction uses slot */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; for (is = 0; is < MAX_INSN_SIZE; ++is) p->ainsn.insn[is] = tmp_insn[is]; flush_insns(p->ainsn.insn, sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); p->ainsn.insn_fn = (kprobe_insn_fn_t *) ((uintptr_t)p->ainsn.insn | thumb); break; case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ p->ainsn.insn = NULL; break; } return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned int brkp; void *addr; if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { /* Remove any Thumb flag */ addr = (void *)((uintptr_t)p->addr & ~1); if (is_wide_instruction(p->opcode)) brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; else brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; } else { kprobe_opcode_t insn = p->opcode; addr = p->addr; brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; if (insn >= 0xe0000000) brkp |= 0xe0000000; /* Unconditional instruction */ else brkp |= insn & 0xf0000000; /* Copy condition from insn */ } patch_text(addr, brkp); } /* * The actual disarming is done here on each CPU and synchronized using * stop_machine. This synchronization is necessary on SMP to avoid removing * a probe between the moment the 'Undefined Instruction' exception is raised * and the moment the exception handler reads the faulting instruction from * memory. It is also needed to atomically set the two half-words of a 32-bit * Thumb breakpoint. */ int __kprobes __arch_disarm_kprobe(void *p) { struct kprobe *kp = p; void *addr = (void *)((uintptr_t)kp->addr & ~1); __patch_text(addr, kp->opcode); return 0; } void __kprobes arch_disarm_kprobe(struct kprobe *p) { stop_machine(__arch_disarm_kprobe, p, cpu_online_mask); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, 0); p->ainsn.insn = NULL; } } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; } static void __kprobes set_current_kprobe(struct kprobe *p) { __get_cpu_var(current_kprobe) = p; } static void __kprobes singlestep_skip(struct kprobe *p, struct pt_regs *regs) { #ifdef CONFIG_THUMB2_KERNEL regs->ARM_cpsr = it_advance(regs->ARM_cpsr); if (is_wide_instruction(p->opcode)) regs->ARM_pc += 4; else regs->ARM_pc += 2; #else regs->ARM_pc += 4; #endif } static inline void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { p->ainsn.insn_singlestep(p, regs); } /* * Called with IRQs disabled. IRQs must remain disabled from that point * all the way until processing this kprobe is complete. The current * kprobes implementation cannot process more than one nested level of * kprobe, and that level is reserved for user kprobe handlers, so we can't * risk encountering a new kprobe in an interrupt handler. */ void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur; struct kprobe_ctlblk *kcb; kcb = get_kprobe_ctlblk(); cur = kprobe_running(); #ifdef CONFIG_THUMB2_KERNEL /* * First look for a probe which was registered using an address with * bit 0 set, this is the usual situation for pointers to Thumb code. * If not found, fallback to looking for one with bit 0 clear. */ p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); if (!p) p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #else /* ! CONFIG_THUMB2_KERNEL */ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #endif if (p) { if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); set_current_kprobe(p); kcb->kprobe_status = KPROBE_REENTER; singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; default: /* impossible cases */ BUG(); } } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it prepped * for calling the break_handler below on re-entry, * so get out doing nothing more here. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(p, regs, kcb); if (p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } reset_current_kprobe(); } } else { /* * Probe hit but conditional execution check failed, * so just skip the instruction and continue as if * nothing had happened. */ singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ if (cur->break_handler && cur->break_handler(cur, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(cur, regs, kcb); if (cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } } reset_current_kprobe(); } else { /* * The probe was removed and a race is in progress. * There is nothing we can do about it. Let's restart * the instruction. By the time we can restart, the * real instruction will be there. */ } } static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) { unsigned long flags; local_irq_save(flags); kprobe_handler(regs); local_irq_restore(flags); return 0; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch (kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the PC to point back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->ARM_pc = (long)cur->addr; if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); } else { reset_current_kprobe(); } break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); /* * We come here because instructions in the pre/post * handler caused the page_fault, this could happen * if handler tries to access user space by * copy_from_user(), get_user() etc. Let the * user-specified handler try to fix it. */ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) return 1; break; default: break; } return 0; } int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { /* * notify_die() is currently never called on ARM, * so this callback is currently empty. */ return NOTIFY_DONE; } /* * When a retprobed function returns, trampoline_handler() is called, * calling the kretprobe's handler. We construct a struct pt_regs to * give a view of registers r0-r11 to the user return-handler. This is * not a complete pt_regs structure, but that should be plenty sufficient * for kretprobe handlers which should normally be interested in r0 only * anyway. */ void __naked __kprobes kretprobe_trampoline(void) { __asm__ __volatile__ ( "stmdb sp!, {r0 - r11} \n\t" "mov r0, sp \n\t" "bl trampoline_handler \n\t" "mov lr, r0 \n\t" "ldmia sp!, {r0 - r11} \n\t" #ifdef CONFIG_THUMB2_KERNEL "bx lr \n\t" #else "mov pc, lr \n\t" #endif : : : "memory"); } /* Called from kretprobe_trampoline */ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because multiple functions in the call path have * a return probe installed on them, and/or more than one return * probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) { __get_cpu_var(current_kprobe) = &ri->rp->kp; get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ri->rp->handler(ri, regs); __get_cpu_var(current_kprobe) = NULL; } orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } return (void *)orig_ret_address; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; /* Replace the return addr with trampoline addr. */ regs->ARM_lr = (unsigned long)&kretprobe_trampoline; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); long sp_addr = regs->ARM_sp; long cpsr; kcb->jprobe_saved_regs = *regs; memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); regs->ARM_pc = (long)jp->entry; cpsr = regs->ARM_cpsr | PSR_I_BIT; #ifdef CONFIG_THUMB2_KERNEL /* Set correct Thumb state in cpsr */ if (regs->ARM_pc & 1) cpsr |= PSR_T_BIT; else cpsr &= ~PSR_T_BIT; #endif regs->ARM_cpsr = cpsr; preempt_disable(); return 1; } void __kprobes jprobe_return(void) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); __asm__ __volatile__ ( /* * Setup an empty pt_regs. Fill SP and PC fields as * they're needed by longjmp_break_handler. * * We allocate some slack between the original SP and start of * our fabricated regs. To be precise we want to have worst case * covered which is STMFD with all 16 regs so we allocate 2 * * sizeof(struct_pt_regs)). * * This is to prevent any simulated instruction from writing * over the regs when they are accessing the stack. */ #ifdef CONFIG_THUMB2_KERNEL "sub r0, %0, %1 \n\t" "mov sp, r0 \n\t" #else "sub sp, %0, %1 \n\t" #endif "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" "str %0, [sp, %2] \n\t" "str r0, [sp, %3] \n\t" "mov r0, sp \n\t" "bl kprobe_handler \n\t" /* * Return to the context saved by setjmp_pre_handler * and restored by longjmp_break_handler. */ #ifdef CONFIG_THUMB2_KERNEL "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ /* rfe context */ "ldmia sp, {r0 - r12} \n\t" "mov sp, lr \n\t" "ldr lr, [sp], #4 \n\t" "rfeia sp! \n\t" #else "ldr r0, [sp, %4] \n\t" "msr cpsr_cxsf, r0 \n\t" "ldmia sp, {r0 - pc} \n\t" #endif : : "r" (kcb->jprobe_saved_regs.ARM_sp), "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), "J" (offsetof(struct pt_regs, ARM_cpsr)), "J" (offsetof(struct pt_regs, ARM_lr)) : "memory", "cc"); } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); long stack_addr = kcb->jprobe_saved_regs.ARM_sp; long orig_sp = regs->ARM_sp; struct jprobe *jp = container_of(p, struct jprobe, kp); if (regs->ARM_pc == JPROBE_MAGIC_ADDR) { if (orig_sp != stack_addr) { struct pt_regs *saved_regs = (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp; printk("current sp %lx does not match saved sp %lx\n", orig_sp, stack_addr); printk("Saved registers for jprobe %p\n", jp); show_regs(saved_regs); printk("Current registers\n"); show_regs(regs); BUG(); } *regs = kcb->jprobe_saved_regs; memcpy((void *)stack_addr, kcb->jprobes_stack, MIN_STACK_SIZE(stack_addr)); preempt_enable_no_resched(); return 1; } return 0; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } #ifdef CONFIG_THUMB2_KERNEL static struct undef_hook kprobes_thumb16_break_hook = { .instr_mask = 0xffff, .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; static struct undef_hook kprobes_thumb32_break_hook = { .instr_mask = 0xffffffff, .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; #else /* !CONFIG_THUMB2_KERNEL */ static struct undef_hook kprobes_arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = kprobe_trap_handler, }; #endif /* !CONFIG_THUMB2_KERNEL */ int __init arch_init_kprobes() { arm_kprobe_decode_init(); #ifdef CONFIG_THUMB2_KERNEL register_undef_hook(&kprobes_thumb16_break_hook); register_undef_hook(&kprobes_thumb32_break_hook); #else register_undef_hook(&kprobes_arm_break_hook); #endif return 0; }
gpl-2.0
kamarush/yuga_aosp_kernel_lp
drivers/scsi/mesh.c
7811
53907
/* * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware) * bus adaptor found on Power Macintosh computers. * We assume the MESH is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. * * Apr. 21 2002 - BenH Rework bus reset code for new error handler * Add delay after initial bus reset * Add module parameters * * Sep. 27 2003 - BenH Move to new driver model, fix some write posting * issues * To do: * - handle aborts correctly * - retry arbitration if lost (unless higher levels do this for us) * - power down the chip when no device is detected */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/processor.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mesh.h" #if 1 #undef KERN_DEBUG #define KERN_DEBUG KERN_WARNING #endif MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)"); MODULE_DESCRIPTION("PowerMac MESH SCSI driver"); MODULE_LICENSE("GPL"); static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE; static int sync_targets = 0xff; static int resel_targets = 0xff; static int debug_targets = 0; /* print debug for these targets */ static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS; module_param(sync_rate, int, 0); MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)"); module_param(sync_targets, int, 0); MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous"); module_param(resel_targets, int, 0); MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect"); module_param(debug_targets, int, 0644); MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets"); module_param(init_reset_delay, int, 0); MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)"); static int mesh_sync_period = 100; static int mesh_sync_offset = 0; static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */ #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1) #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1) #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1) #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id)) #undef MESH_DBG #define N_DBG_LOG 50 #define N_DBG_SLOG 20 #define NUM_DBG_EVENTS 13 #undef DBG_USE_TB /* bombs on 601 */ struct dbglog { char *fmt; u32 tb; u8 phase; u8 bs0; u8 bs1; u8 tgt; int d; }; enum mesh_phase { idle, arbitrating, selecting, commanding, dataing, statusing, busfreeing, disconnecting, reselecting, sleeping }; enum msg_phase { msg_none, msg_out, msg_out_xxx, msg_out_last, msg_in, msg_in_bad, }; enum sdtr_phase { do_sdtr, sdtr_sent, sdtr_done }; struct mesh_target { enum sdtr_phase sdtr_state; int sync_params; int data_goes_out; /* guess as to data direction */ struct scsi_cmnd *current_req; u32 saved_ptr; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_LOG]; #endif }; struct mesh_state { volatile struct mesh_regs __iomem *mesh; int meshintr; volatile struct dbdma_regs __iomem *dma; int dmaintr; struct Scsi_Host *host; struct mesh_state *next; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; enum mesh_phase phase; /* what we're currently trying to do */ enum msg_phase msgphase; int conn_tgt; /* target we're connected to */ struct scsi_cmnd *current_req; /* req we're currently working on */ int data_ptr; int dma_started; int dma_count; int stat; int aborting; int expect_reply; int n_msgin; u8 msgin[16]; int n_msgout; int last_n_msgout; u8 msgout[16]; struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ dma_addr_t dma_cmd_bus; void *dma_cmd_space; int dma_cmd_size; int clk_freq; struct mesh_target tgts[8]; struct macio_dev *mdev; struct pci_dev* pdev; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_SLOG]; #endif }; /* * Driver is too messy, we need a few prototypes... */ static void mesh_done(struct mesh_state *ms, int start_next); static void mesh_interrupt(struct mesh_state *ms); static void cmd_complete(struct mesh_state *ms); static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd); static void halt_dma(struct mesh_state *ms); static void phase_mismatch(struct mesh_state *ms); /* * Some debugging & logging routines */ #ifdef MESH_DBG static inline u32 readtb(void) { u32 tb; #ifdef DBG_USE_TB /* Beware: if you enable this, it will crash on 601s. */ asm ("mftb %0" : "=r" (tb) : ); #else tb = 0; #endif return tb; } static void dlog(struct mesh_state *ms, char *fmt, int a) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; struct dbglog *tlp, *slp; tlp = &tp->log[tp->log_ix]; slp = &ms->log[ms->log_ix]; tlp->fmt = fmt; tlp->tb = readtb(); tlp->phase = (ms->msgphase << 4) + ms->phase; tlp->bs0 = ms->mesh->bus_status0; tlp->bs1 = ms->mesh->bus_status1; tlp->tgt = ms->conn_tgt; tlp->d = a; *slp = *tlp; if (++tp->log_ix >= N_DBG_LOG) tp->log_ix = 0; if (tp->n_log < N_DBG_LOG) ++tp->n_log; if (++ms->log_ix >= N_DBG_SLOG) ms->log_ix = 0; if (ms->n_log < N_DBG_SLOG) ++ms->n_log; } static void dumplog(struct mesh_state *ms, int t) { struct mesh_target *tp = &ms->tgts[t]; struct dbglog *lp; int i; if (tp->n_log == 0) return; i = tp->log_ix - tp->n_log; if (i < 0) i += N_DBG_LOG; tp->n_log = 0; do { lp = &tp->log[i]; printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ", t, lp->bs1, lp->bs0, lp->phase); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_LOG) i = 0; } while (i != tp->log_ix); } static void dumpslog(struct mesh_state *ms) { struct dbglog *lp; int i; if (ms->n_log == 0) return; i = ms->log_ix - ms->n_log; if (i < 0) i += N_DBG_SLOG; ms->n_log = 0; do { lp = &ms->log[i]; printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ", lp->bs1, lp->bs0, lp->phase, lp->tgt); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_SLOG) i = 0; } while (i != ms->log_ix); } #else static inline void dlog(struct mesh_state *ms, char *fmt, int a) {} static inline void dumplog(struct mesh_state *ms, int tgt) {} static inline void dumpslog(struct mesh_state *ms) {} #endif /* MESH_DBG */ #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) static void mesh_dump_regs(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; int t; struct mesh_target *tp; printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n", ms, mr, md); printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x " "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n", (mr->count_hi << 8) + mr->count_lo, mr->sequence, (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, mr->exception, mr->error, mr->intr_mask, mr->interrupt, mr->sync_params); while(in_8(&mr->fifo_count)) printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n", in_le32(&md->status), in_le32(&md->cmdptr)); printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n", ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr); printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n", ms->dma_started, ms->dma_count, ms->n_msgout); for (t = 0; t < 8; ++t) { tp = &ms->tgts[t]; if (tp->current_req == NULL) continue; printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n", t, tp->current_req, tp->data_goes_out, tp->saved_ptr); } } /* * Flush write buffers on the bus path to the mesh */ static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) { (void)in_8(&mr->mesh_id); } /* * Complete a SCSI command */ static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd) { (*cmd->scsi_done)(cmd); } /* Called with meshinterrupt disabled, initialize the chipset * and eventually do the initial bus reset. The lock must not be * held since we can schedule. */ static void mesh_init(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; mesh_flush_io(mr); udelay(100); /* Reset controller */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); if (init_reset_delay) { printk(KERN_INFO "mesh: performing initial bus reset...\n"); /* Reset bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ mesh_flush_io(mr); /* Wait for bus to come back */ msleep(init_reset_delay); } /* Reconfigure controller */ out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */ out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); ms->phase = idle; ms->msgphase = msg_none; } static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd) { volatile struct mesh_regs __iomem *mr = ms->mesh; int t, id; id = cmd->device->id; ms->current_req = cmd; ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE; ms->tgts[id].current_req = cmd; #if 1 if (DEBUG_TARGET(cmd)) { int i; printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id); for (i = 0; i < cmd->cmd_len; ++i) printk(" %x", cmd->cmnd[i]); printk(" use_sg=%d buffer=%p bufflen=%u\n", scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd)); } #endif if (ms->dma_started) panic("mesh: double DMA start !\n"); ms->phase = arbitrating; ms->msgphase = msg_none; ms->data_ptr = 0; ms->dma_started = 0; ms->n_msgout = 0; ms->last_n_msgout = 0; ms->expect_reply = 0; ms->conn_tgt = id; ms->tgts[id].saved_ptr = 0; ms->stat = DID_OK; ms->aborting = 0; #ifdef MESH_DBG ms->tgts[id].n_log = 0; dlog(ms, "start cmd=%x", (int) cmd); #endif /* Off we go */ dlog(ms, "about to arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->interrupt, INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* * Some other device has the bus or is arbitrating for it - * probably a target which is about to reselect us. */ dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); for (t = 100; t > 0; --t) { if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0) break; if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; } udelay(1); } if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* XXX should try again in a little while */ ms->stat = DID_BUS_BUSY; ms->phase = idle; mesh_done(ms, 0); return; } } /* * Apparently the mesh has a bug where it will assert both its * own bit and the target's bit on the bus during arbitration. */ out_8(&mr->dest_id, mr->source_id); /* * There appears to be a race with reselection sometimes, * where a target reselects us just as we issue the * arbitrate command. It seems that then the arbitrate * command just hangs waiting for the bus to be free * without giving us a reselection exception. * The only way I have found to get it to respond correctly * is this: disable reselection before issuing the arbitrate * command, then after issuing it, if it looks like a target * is trying to reselect us, reset the mesh and then enable * reselection. */ out_8(&mr->sequence, SEQ_DISRESEL); if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); } out_8(&mr->sequence, SEQ_ARBITRATE); for (t = 230; t > 0; --t) { if (in_8(&mr->interrupt) != 0) break; udelay(1); } dlog(ms, "after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { /* looks like a reselection - try resetting the mesh */ dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t) udelay(1); dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); #ifndef MESH_MULTIPLE_HOSTS if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { printk(KERN_ERR "mesh: controller not responding" " to reselection!\n"); /* * If this is a target reselecting us, and the * mesh isn't responding, the higher levels of * the scsi code will eventually time out and * reset the bus. */ } #endif } } /* * Start the next command for a MESH. * Should be called with interrupts disabled. */ static void mesh_start(struct mesh_state *ms) { struct scsi_cmnd *cmd, *prev, *next; if (ms->phase != idle || ms->current_req != NULL) { printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)", ms->phase, ms); return; } while (ms->phase == idle) { prev = NULL; for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) { if (cmd == NULL) return; if (ms->tgts[cmd->device->id].current_req == NULL) break; prev = cmd; } next = (struct scsi_cmnd *) cmd->host_scribble; if (prev == NULL) ms->request_q = next; else prev->host_scribble = (void *) next; if (next == NULL) ms->request_qtail = prev; mesh_start_cmd(ms, cmd); } } static void mesh_done(struct mesh_state *ms, int start_next) { struct scsi_cmnd *cmd; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; cmd = ms->current_req; ms->current_req = NULL; tp->current_req = NULL; if (cmd) { cmd->result = (ms->stat << 16) + cmd->SCp.Status; if (ms->stat == DID_OK) cmd->result += (cmd->SCp.Message << 8); if (DEBUG_TARGET(cmd)) { printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", cmd->result, ms->data_ptr, scsi_bufflen(cmd)); #if 0 /* needs to use sg? */ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3) && cmd->request_buffer != 0) { unsigned char *b = cmd->request_buffer; printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); } #endif } cmd->SCp.this_residual -= ms->data_ptr; mesh_completed(ms, cmd); } if (start_next) { out_8(&ms->mesh->sequence, SEQ_ENBRESEL); mesh_flush_io(ms->mesh); udelay(1); ms->phase = idle; mesh_start(ms); } } static inline void add_sdtr_msg(struct mesh_state *ms) { int i = ms->n_msgout; ms->msgout[i] = EXTENDED_MESSAGE; ms->msgout[i+1] = 3; ms->msgout[i+2] = EXTENDED_SDTR; ms->msgout[i+3] = mesh_sync_period/4; ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0); ms->n_msgout = i + 5; } static void set_sdtr(struct mesh_state *ms, int period, int offset) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; volatile struct mesh_regs __iomem *mr = ms->mesh; int v, tr; tp->sdtr_state = sdtr_done; if (offset == 0) { /* asynchronous */ if (SYNC_OFF(tp->sync_params)) printk(KERN_INFO "mesh: target %d now asynchronous\n", ms->conn_tgt); tp->sync_params = ASYNC_PARAMS; out_8(&mr->sync_params, ASYNC_PARAMS); return; } /* * We need to compute ceil(clk_freq * period / 500e6) - 2 * without incurring overflow. */ v = (ms->clk_freq / 5000) * period; if (v <= 250000) { /* special case: sync_period == 5 * clk_period */ v = 0; /* units of tr are 100kB/s */ tr = (ms->clk_freq + 250000) / 500000; } else { /* sync_period == (v + 2) * 2 * clk_period */ v = (v + 99999) / 100000 - 2; if (v > 15) v = 15; /* oops */ tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000; } if (offset > 15) offset = 15; /* can't happen */ tp->sync_params = SYNC_PARAMS(offset, v); out_8(&mr->sync_params, tp->sync_params); printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n", ms->conn_tgt, tr/10, tr%10); } static void start_phase(struct mesh_state *ms) { int i, seq, nb; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; dlog(ms, "start_phase nmo/exc/fc/seq = %.8x", MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence)); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_none: break; case msg_in: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + seq); ms->n_msgin = 0; return; case msg_out: /* * To make sure ATN drops before we assert ACK for * the last byte of the message, we have to do the * last byte specially. */ if (ms->n_msgout <= 0) { printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n", ms->n_msgout); mesh_dump_regs(ms); ms->msgphase = msg_none; break; } if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "mesh: sending %d msg bytes:", ms->n_msgout); for (i = 0; i < ms->n_msgout; ++i) printk(" %x", ms->msgout[i]); printk("\n"); } dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0], ms->msgout[1], ms->msgout[2])); out_8(&mr->count_hi, 0); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); /* * If ATN is not already asserted, we assert it, then * issue a SEQ_MSGOUT to get the mesh to drop ACK. */ if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0); out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); out_8(&mr->bus_status0, 0); /* release explicit ATN */ dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0); } if (ms->n_msgout == 1) { /* * We can't issue the SEQ_MSGOUT without ATN * until the target has asserted REQ. The logic * in cmd_complete handles both situations: * REQ already asserted or not. */ cmd_complete(ms); } else { out_8(&mr->count_lo, ms->n_msgout - 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); for (i = 0; i < ms->n_msgout - 1; ++i) out_8(&mr->fifo, ms->msgout[i]); } return; default: printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n", ms->msgphase); } switch (ms->phase) { case selecting: out_8(&mr->dest_id, ms->conn_tgt); out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN); break; case commanding: out_8(&mr->sync_params, tp->sync_params); out_8(&mr->count_hi, 0); if (cmd) { out_8(&mr->count_lo, cmd->cmd_len); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < cmd->cmd_len; ++i) out_8(&mr->fifo, cmd->cmnd[i]); } else { out_8(&mr->count_lo, 6); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < 6; ++i) out_8(&mr->fifo, 0); } break; case dataing: /* transfer data, if any */ if (!ms->dma_started) { set_dma_cmds(ms, cmd); out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds)); out_le32(&md->control, (RUN << 16) | RUN); ms->dma_started = 1; } nb = ms->dma_count; if (nb > 0xfff0) nb = 0xfff0; ms->dma_count -= nb; ms->data_ptr += nb; out_8(&mr->count_lo, nb); out_8(&mr->count_hi, nb >> 8); out_8(&mr->sequence, (tp->data_goes_out? SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq); break; case statusing: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_STATUS + seq); break; case busfreeing: case disconnecting: out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); dlog(ms, "enbresel intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_BUSFREE); break; default: printk(KERN_ERR "mesh: start_phase called with phase=%d\n", ms->phase); dumpslog(ms); } } static inline void get_msgin(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int i, n; n = mr->fifo_count; if (n != 0) { i = ms->n_msgin; ms->n_msgin = i + n; for (; n > 0; --n) ms->msgin[i++] = in_8(&mr->fifo); } } static inline int msgin_length(struct mesh_state *ms) { int b, n; n = 1; if (ms->n_msgin > 0) { b = ms->msgin[0]; if (b == 1) { /* extended message */ n = ms->n_msgin < 2? 2: ms->msgin[1] + 2; } else if (0x20 <= b && b <= 0x2f) { /* 2-byte message */ n = 2; } } return n; } static void reselected(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd; struct mesh_target *tp; int b, t, prev; switch (ms->phase) { case idle: break; case arbitrating: if ((cmd = ms->current_req) != NULL) { /* put the command back on the queue */ cmd->host_scribble = (void *) ms->request_q; if (ms->request_q == NULL) ms->request_qtail = cmd; ms->request_q = cmd; tp = &ms->tgts[cmd->device->id]; tp->current_req = NULL; } break; case busfreeing: ms->phase = reselecting; mesh_done(ms, 0); break; case disconnecting: break; default: printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n", ms->msgphase, ms->phase, ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } if (ms->dma_started) { printk(KERN_ERR "mesh: reselected with DMA started !\n"); halt_dma(ms); } ms->current_req = NULL; ms->phase = dataing; ms->msgphase = msg_in; ms->n_msgout = 0; ms->last_n_msgout = 0; prev = ms->conn_tgt; /* * We seem to get abortive reselections sometimes. */ while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) { static int mesh_aborted_resels; mesh_aborted_resels++; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(5); dlog(ms, "extra resel err/exc/fc = %.6x", MKWORD(0, mr->error, mr->exception, mr->fifo_count)); } out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); /* * Find out who reselected us. */ if (in_8(&mr->fifo_count) == 0) { printk(KERN_ERR "mesh: reselection but nothing in fifo?\n"); ms->conn_tgt = ms->host->this_id; goto bogus; } /* get the last byte in the fifo */ do { b = in_8(&mr->fifo); dlog(ms, "reseldata %x", b); } while (in_8(&mr->fifo_count)); for (t = 0; t < 8; ++t) if ((b & (1 << t)) != 0 && t != ms->host->this_id) break; if (b != (1 << t) + (1 << ms->host->this_id)) { printk(KERN_ERR "mesh: bad reselection data %x\n", b); ms->conn_tgt = ms->host->this_id; goto bogus; } /* * Set up to continue with that target's transfer. */ ms->conn_tgt = t; tp = &ms->tgts[t]; out_8(&mr->sync_params, tp->sync_params); if (ALLOW_DEBUG(t)) { printk(KERN_DEBUG "mesh: reselected by target %d\n", t); printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n", tp->saved_ptr, tp->data_goes_out, tp->current_req); } ms->current_req = tp->current_req; if (tp->current_req == NULL) { printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t); goto bogus; } ms->data_ptr = tp->saved_ptr; dlog(ms, "resel prev tgt=%d", prev); dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception)); start_phase(ms); return; bogus: dumplog(ms, ms->conn_tgt); dumpslog(ms); ms->data_ptr = 0; ms->aborting = 1; start_phase(ms); } static void do_abort(struct mesh_state *ms) { ms->msgout[0] = ABORT; ms->n_msgout = 1; ms->aborting = 1; ms->stat = DID_ABORT; dlog(ms, "abort", 0); } static void handle_reset(struct mesh_state *ms) { int tgt; struct mesh_target *tp; struct scsi_cmnd *cmd; volatile struct mesh_regs __iomem *mr = ms->mesh; for (tgt = 0; tgt < 8; ++tgt) { tp = &ms->tgts[tgt]; if ((cmd = tp->current_req) != NULL) { cmd->result = DID_RESET << 16; tp->current_req = NULL; mesh_completed(ms, cmd); } ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; } ms->current_req = NULL; while ((cmd = ms->request_q) != NULL) { ms->request_q = (struct scsi_cmnd *) cmd->host_scribble; cmd->result = DID_RESET << 16; mesh_completed(ms, cmd); } ms->phase = idle; ms->msgphase = msg_none; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); } static irqreturn_t do_mesh_interrupt(int irq, void *dev_id) { unsigned long flags; struct mesh_state *ms = dev_id; struct Scsi_Host *dev = ms->host; spin_lock_irqsave(dev->host_lock, flags); mesh_interrupt(ms); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void handle_error(struct mesh_state *ms) { int err, exc, count; volatile struct mesh_regs __iomem *mr = ms->mesh; err = in_8(&mr->error); exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); dlog(ms, "error err/exc/fc/cl=%.8x", MKWORD(err, exc, mr->fifo_count, mr->count_lo)); if (err & ERR_SCSIRESET) { /* SCSI bus was reset */ printk(KERN_INFO "mesh: SCSI bus reset detected: " "waiting for end..."); while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; } if (err & ERR_UNEXPDISC) { /* Unexpected disconnect */ if (exc & EXC_RESELECTED) { reselected(ms); return; } if (!ms->aborting) { printk(KERN_WARNING "mesh: target %d aborted\n", ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } out_8(&mr->interrupt, INT_CMDDONE); ms->stat = DID_ABORT; mesh_done(ms, 1); return; } if (err & ERR_PARITY) { if (ms->msgphase == msg_in) { printk(KERN_ERR "mesh: msg parity error, target %d\n", ms->conn_tgt); ms->msgout[0] = MSG_PARITY_ERROR; ms->n_msgout = 1; ms->msgphase = msg_in_bad; cmd_complete(ms); return; } if (ms->stat == DID_OK) { printk(KERN_ERR "mesh: parity error, target %d\n", ms->conn_tgt); ms->stat = DID_PARITY; } count = (mr->count_hi << 8) + mr->count_lo; if (count == 0) { cmd_complete(ms); } else { /* reissue the data transfer command */ out_8(&mr->sequence, mr->sequence); } return; } if (err & ERR_SEQERR) { if (exc & EXC_RESELECTED) { /* This can happen if we issue a command to get the bus just after the target reselects us. */ static int mesh_resel_seqerr; mesh_resel_seqerr++; reselected(ms); return; } if (exc == EXC_PHASEMM) { static int mesh_phasemm_seqerr; mesh_phasemm_seqerr++; phase_mismatch(ms); return; } printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n", err, exc); } else { printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc); } mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) { /* try to do what the target wants */ do_abort(ms); phase_mismatch(ms); return; } ms->stat = DID_ERROR; mesh_done(ms, 1); } static void handle_exception(struct mesh_state *ms) { int exc; volatile struct mesh_regs __iomem *mr = ms->mesh; exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE); if (exc & EXC_RESELECTED) { static int mesh_resel_exc; mesh_resel_exc++; reselected(ms); } else if (exc == EXC_ARBLOST) { printk(KERN_DEBUG "mesh: lost arbitration\n"); ms->stat = DID_BUS_BUSY; mesh_done(ms, 1); } else if (exc == EXC_SELTO) { /* selection timed out */ ms->stat = DID_BAD_TARGET; mesh_done(ms, 1); } else if (exc == EXC_PHASEMM) { /* target wants to do something different: find out what it wants and do it. */ phase_mismatch(ms); } else { printk(KERN_ERR "mesh: can't cope with exception %x\n", exc); mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); do_abort(ms); phase_mismatch(ms); } } static void handle_msgin(struct mesh_state *ms) { int i, code; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; if (ms->n_msgin == 0) return; code = ms->msgin[0]; if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); } dlog(ms, "msgin msg=%.8x", MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2])); ms->expect_reply = 0; ms->n_msgout = 0; if (ms->n_msgin < msgin_length(ms)) goto reject; if (cmd) cmd->SCp.Message = code; switch (code) { case COMMAND_COMPLETE: break; case EXTENDED_MESSAGE: switch (ms->msgin[2]) { case EXTENDED_MODIFY_DATA_POINTER: ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6] + (ms->msgin[4] << 16) + (ms->msgin[5] << 8); break; case EXTENDED_SDTR: if (tp->sdtr_state != sdtr_sent) { /* reply with an SDTR */ add_sdtr_msg(ms); /* limit period to at least his value, offset to no more than his */ if (ms->msgout[3] < ms->msgin[3]) ms->msgout[3] = ms->msgin[3]; if (ms->msgout[4] > ms->msgin[4]) ms->msgout[4] = ms->msgin[4]; set_sdtr(ms, ms->msgout[3], ms->msgout[4]); ms->msgphase = msg_out; } else { set_sdtr(ms, ms->msgin[3], ms->msgin[4]); } break; default: goto reject; } break; case SAVE_POINTERS: tp->saved_ptr = ms->data_ptr; break; case RESTORE_POINTERS: ms->data_ptr = tp->saved_ptr; break; case DISCONNECT: ms->phase = disconnecting; break; case ABORT: break; case MESSAGE_REJECT: if (tp->sdtr_state == sdtr_sent) set_sdtr(ms, 0, 0); break; case NOP: break; default: if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) { if (cmd == NULL) { do_abort(ms); ms->msgphase = msg_out; } else if (code != cmd->device->lun + IDENTIFY_BASE) { printk(KERN_WARNING "mesh: lun mismatch " "(%d != %d) on reselection from " "target %d\n", code - IDENTIFY_BASE, cmd->device->lun, ms->conn_tgt); } break; } goto reject; } return; reject: printk(KERN_WARNING "mesh: rejecting message from target %d:", ms->conn_tgt); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); ms->msgout[0] = MESSAGE_REJECT; ms->n_msgout = 1; ms->msgphase = msg_out; } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) { int i, dma_cmd, total, off, dtot; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out? OUTPUT_MORE: INPUT_MORE; dcmds = ms->dma_cmds; dtot = 0; if (cmd) { int nseg; cmd->SCp.this_residual = scsi_bufflen(cmd); nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { total = 0; off = ms->data_ptr; scsi_for_each_sg(cmd, scl, nseg, i) { u32 dma_addr = sg_dma_address(scl); u32 dma_len = sg_dma_len(scl); total += scl->length; if (off >= dma_len) { off -= dma_len; continue; } if (dma_len > 0xffff) panic("mesh: scatterlist element >= 64k"); st_le16(&dcmds->req_count, dma_len - off); st_le16(&dcmds->command, dma_cmd); st_le32(&dcmds->phy_addr, dma_addr + off); dcmds->xfer_status = 0; ++dcmds; dtot += dma_len - off; off = 0; } } } if (dtot == 0) { /* Either the target has overrun our buffer, or the caller didn't provide a buffer. */ static char mesh_extra_buf[64]; dtot = sizeof(mesh_extra_buf); st_le16(&dcmds->req_count, dtot); st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf)); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; st_le16(&dcmds[-1].command, dma_cmd); memset(dcmds, 0, sizeof(*dcmds)); st_le16(&dcmds->command, DBDMA_STOP); ms->dma_count = dtot; } static void halt_dma(struct mesh_state *ms) { volatile struct dbdma_regs __iomem *md = ms->dma; volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; int t, nb; if (!ms->tgts[ms->conn_tgt].data_goes_out) { /* wait a little while until the fifo drains */ t = 50; while (t > 0 && in_8(&mr->fifo_count) != 0 && (in_le32(&md->status) & ACTIVE) != 0) { --t; udelay(1); } } out_le32(&md->control, RUN << 16); /* turn off RUN bit */ nb = (mr->count_hi << 8) + mr->count_lo; dlog(ms, "halt_dma fc/count=%.6x", MKWORD(0, mr->fifo_count, 0, nb)); if (ms->tgts[ms->conn_tgt].data_goes_out) nb += mr->fifo_count; /* nb is the number of bytes not yet transferred to/from the target. */ ms->data_ptr -= nb; dlog(ms, "data_ptr %x", ms->data_ptr); if (ms->data_ptr < 0) { printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n", ms->data_ptr, nb, ms); ms->data_ptr = 0; #ifdef MESH_DBG dumplog(ms, ms->conn_tgt); dumpslog(ms); #endif /* MESH_DBG */ } else if (cmd && scsi_bufflen(cmd) && ms->data_ptr > scsi_bufflen(cmd)) { printk(KERN_DEBUG "mesh: target %d overrun, " "data_ptr=%x total=%x goes_out=%d\n", ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } scsi_dma_unmap(cmd); ms->dma_started = 0; } static void phase_mismatch(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int phase; dlog(ms, "phasemm ch/cl/seq/fc=%.8x", MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count)); phase = in_8(&mr->bus_status0) & BS0_PHASE; if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) { /* output the last byte of the message, without ATN */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; return; } if (ms->msgphase == msg_in) { get_msgin(ms); if (ms->n_msgin) handle_msgin(ms); } if (ms->dma_started) halt_dma(ms); if (mr->fifo_count) { out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); } ms->msgphase = msg_none; switch (phase) { case BP_DATAIN: ms->tgts[ms->conn_tgt].data_goes_out = 0; ms->phase = dataing; break; case BP_DATAOUT: ms->tgts[ms->conn_tgt].data_goes_out = 1; ms->phase = dataing; break; case BP_COMMAND: ms->phase = commanding; break; case BP_STATUS: ms->phase = statusing; break; case BP_MSGIN: ms->msgphase = msg_in; ms->n_msgin = 0; break; case BP_MSGOUT: ms->msgphase = msg_out; if (ms->n_msgout == 0) { if (ms->aborting) { do_abort(ms); } else { if (ms->last_n_msgout == 0) { printk(KERN_DEBUG "mesh: no msg to repeat\n"); ms->msgout[0] = NOP; ms->last_n_msgout = 1; } ms->n_msgout = ms->last_n_msgout; } } break; default: printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase); ms->stat = DID_ERROR; mesh_done(ms, 1); return; } start_phase(ms); } static void cmd_complete(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; int seq, n, t; dlog(ms, "cmd_complete fc=%x", mr->fifo_count); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_out_xxx: /* huh? we expected a phase mismatch */ ms->n_msgin = 0; ms->msgphase = msg_in; /* fall through */ case msg_in: /* should have some message bytes in fifo */ get_msgin(ms); n = msgin_length(ms); if (ms->n_msgin < n) { out_8(&mr->count_lo, n - ms->n_msgin); out_8(&mr->sequence, SEQ_MSGIN + seq); } else { ms->msgphase = msg_none; handle_msgin(ms); start_phase(ms); } break; case msg_in_bad: out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg); break; case msg_out: /* * To get the right timing on ATN wrt ACK, we have * to get the MESH to drop ACK, wait until REQ gets * asserted, then drop ATN. To do this we first * issue a SEQ_MSGOUT with ATN and wait for REQ, * then change the command to a SEQ_MSGOUT w/o ATN. * If we don't see REQ in a reasonable time, we * change the command to SEQ_MSGIN with ATN, * wait for the phase mismatch interrupt, then * issue the SEQ_MSGOUT without ATN. */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN); t = 30; /* wait up to 30us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0) udelay(1); dlog(ms, "last_mbyte err/exc/fc/cl=%.8x", MKWORD(mr->error, mr->exception, mr->fifo_count, mr->count_lo)); if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) { /* whoops, target didn't do what we expected */ ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; if (in_8(&mr->interrupt) & INT_ERROR) { printk(KERN_ERR "mesh: error %x in msg_out\n", in_8(&mr->error)); handle_error(ms); return; } if (in_8(&mr->exception) != EXC_PHASEMM) printk(KERN_ERR "mesh: exc %x in msg_out\n", in_8(&mr->exception)); else printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n", in_8(&mr->bus_status0)); handle_exception(ms); return; } if (in_8(&mr->bus_status0) & BS0_REQ) { out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; } else { out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN); ms->msgphase = msg_out_xxx; } break; case msg_out_last: ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; ms->msgphase = ms->expect_reply? msg_in: msg_none; start_phase(ms); break; case msg_none: switch (ms->phase) { case idle: printk(KERN_ERR "mesh: interrupt in idle phase?\n"); dumpslog(ms); return; case selecting: dlog(ms, "Selecting phase at command completion",0); ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt), (cmd? cmd->device->lun: 0)); ms->n_msgout = 1; ms->expect_reply = 0; if (ms->aborting) { ms->msgout[0] = ABORT; ms->n_msgout++; } else if (tp->sdtr_state == do_sdtr) { /* add SDTR message */ add_sdtr_msg(ms); ms->expect_reply = 1; tp->sdtr_state = sdtr_sent; } ms->msgphase = msg_out; /* * We need to wait for REQ before dropping ATN. * We wait for at most 30us, then fall back to * a scheme where we issue a SEQ_COMMAND with ATN, * which will give us a phase mismatch interrupt * when REQ does come, and then we send the message. */ t = 230; /* wait up to 230us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) { if (--t < 0) { dlog(ms, "impatient for req", ms->n_msgout); ms->msgphase = msg_none; break; } udelay(1); } break; case dataing: if (ms->dma_count != 0) { start_phase(ms); return; } /* * We can get a phase mismatch here if the target * changes to the status phase, even though we have * had a command complete interrupt. Then, if we * issue the SEQ_STATUS command, we'll get a sequence * error interrupt. Which isn't so bad except that * occasionally the mesh actually executes the * SEQ_STATUS *as well as* giving us the sequence * error and phase mismatch exception. */ out_8(&mr->sequence, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); halt_dma(ms); break; case statusing: if (cmd) { cmd->SCp.Status = mr->fifo; if (DEBUG_TARGET(cmd)) printk(KERN_DEBUG "mesh: status is %x\n", cmd->SCp.Status); } ms->msgphase = msg_in; break; case busfreeing: mesh_done(ms, 1); return; case disconnecting: ms->current_req = NULL; ms->phase = idle; mesh_start(ms); return; default: break; } ++ms->phase; start_phase(ms); break; } } /* * Called by midlayer with host locked to queue a new * request */ static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct mesh_state *ms; cmd->scsi_done = done; cmd->host_scribble = NULL; ms = (struct mesh_state *) cmd->device->host->hostdata; if (ms->request_q == NULL) ms->request_q = cmd; else ms->request_qtail->host_scribble = (void *) cmd; ms->request_qtail = cmd; if (ms->phase == idle) mesh_start(ms); return 0; } static DEF_SCSI_QCMD(mesh_queue) /* * Called to handle interrupts, either call by the interrupt * handler (do_mesh_interrupt) or by other functions in * exceptional circumstances */ static void mesh_interrupt(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int intr; #if 0 if (ALLOW_DEBUG(ms->conn_tgt)) printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x " "phase=%d msgphase=%d\n", mr->bus_status0, mr->interrupt, mr->exception, mr->error, ms->phase, ms->msgphase); #endif while ((intr = in_8(&mr->interrupt)) != 0) { dlog(ms, "interrupt intr/err/exc/seq=%.8x", MKWORD(intr, mr->error, mr->exception, mr->sequence)); if (intr & INT_ERROR) { handle_error(ms); } else if (intr & INT_EXCEPTION) { handle_exception(ms); } else if (intr & INT_CMDDONE) { out_8(&mr->interrupt, INT_CMDDONE); cmd_complete(ms); } } } /* Todo: here we can at least try to remove the command from the * queue if it isn't connected yet, and for pending command, assert * ATN until the bus gets freed. */ static int mesh_abort(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; printk(KERN_DEBUG "mesh_abort(%p)\n", cmd); mesh_dump_regs(ms); dumplog(ms, cmd->device->id); dumpslog(ms); return FAILED; } /* * Called by the midlayer with the lock held to reset the * SCSI host and bus. * The midlayer will wait for devices to come back, we don't need * to do that ourselves */ static int mesh_host_reset(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; unsigned long flags; printk(KERN_DEBUG "mesh_host_reset\n"); spin_lock_irqsave(ms->host->host_lock, flags); /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(1); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); /* Reset the bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ /* Complete pending commands */ handle_reset(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); return SUCCESS; } static void set_mesh_power(struct mesh_state *ms, int state) { if (!machine_is(powermac)) return; if (state) { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1); msleep(200); } else { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0); msleep(10); } } #ifdef CONFIG_PM static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; switch (mesg.event) { case PM_EVENT_SUSPEND: case PM_EVENT_HIBERNATE: case PM_EVENT_FREEZE: break; default: return 0; } if (ms->phase == sleeping) return 0; scsi_block_requests(ms->host); spin_lock_irqsave(ms->host->host_lock, flags); while(ms->phase != idle) { spin_unlock_irqrestore(ms->host->host_lock, flags); msleep(10); spin_lock_irqsave(ms->host->host_lock, flags); } ms->phase = sleeping; spin_unlock_irqrestore(ms->host->host_lock, flags); disable_irq(ms->meshintr); set_mesh_power(ms, 0); return 0; } static int mesh_resume(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; if (ms->phase != sleeping) return 0; set_mesh_power(ms, 1); mesh_init(ms); spin_lock_irqsave(ms->host->host_lock, flags); mesh_start(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); enable_irq(ms->meshintr); scsi_unblock_requests(ms->host); return 0; } #endif /* CONFIG_PM */ /* * If we leave drives set for synchronous transfers (especially * CDROMs), and reboot to MacOS, it gets confused, poor thing. * So, on reboot we reset the SCSI bus. */ static int mesh_shutdown(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); volatile struct mesh_regs __iomem *mr; unsigned long flags; printk(KERN_INFO "resetting MESH scsi bus(es)\n"); spin_lock_irqsave(ms->host->host_lock, flags); mr = ms->mesh; out_8(&mr->intr_mask, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->bus_status1, BS1_RST); mesh_flush_io(mr); udelay(30); out_8(&mr->bus_status1, 0); spin_unlock_irqrestore(ms->host->host_lock, flags); return 0; } static struct scsi_host_template mesh_template = { .proc_name = "mesh", .name = "MESH", .queuecommand = mesh_queue, .eh_abort_handler = mesh_abort, .eh_host_reset_handler = mesh_host_reset, .can_queue = 20, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, }; static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *mesh = macio_get_of_node(mdev); struct pci_dev* pdev = macio_get_pci_dev(mdev); int tgt, minper; const int *cfp; struct mesh_state *ms; struct Scsi_Host *mesh_host; void *dma_cmd_space; dma_addr_t dma_cmd_bus; switch (mdev->bus->chip->type) { case macio_heathrow: case macio_gatwick: case macio_paddington: use_active_neg = 0; break; default: use_active_neg = SEQ_ACTIVE_NEG; } if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs" " (got %d,%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mesh") != 0) { printk(KERN_ERR "mesh: unable to request memory resources"); return -EBUSY; } mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state)); if (mesh_host == NULL) { printk(KERN_ERR "mesh: couldn't register host"); goto out_release; } /* Old junk for root discovery, that will die ultimately */ #if !defined(MODULE) note_scsi_host(mesh, mesh_host); #endif mesh_host->base = macio_resource_start(mdev, 0); mesh_host->irq = macio_irq(mdev, 0); ms = (struct mesh_state *) mesh_host->hostdata; macio_set_drvdata(mdev, ms); ms->host = mesh_host; ms->mdev = mdev; ms->pdev = pdev; ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000); if (ms->mesh == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); goto out_free; } ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000); if (ms->dma == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); iounmap(ms->mesh); goto out_free; } ms->meshintr = macio_irq(mdev, 0); ms->dmaintr = macio_irq(mdev, 1); /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. */ ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd); /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, &dma_cmd_bus); if (dma_cmd_space == NULL) { printk(KERN_ERR "mesh: can't allocate DMA table\n"); goto out_unmap; } memset(dma_cmd_space, 0, ms->dma_cmd_size); ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); ms->dma_cmd_space = dma_cmd_space; ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds) - (unsigned long)dma_cmd_space; ms->current_req = NULL; for (tgt = 0; tgt < 8; ++tgt) { ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; ms->tgts[tgt].current_req = NULL; } if ((cfp = of_get_property(mesh, "clock-frequency", NULL))) ms->clk_freq = *cfp; else { printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); ms->clk_freq = 50000000; } /* The maximum sync rate is clock / 5; increase * mesh_sync_period if necessary. */ minper = 1000000000 / (ms->clk_freq / 5); /* ns */ if (mesh_sync_period < minper) mesh_sync_period = minper; /* Power up the chip */ set_mesh_power(ms, 1); /* Set it up */ mesh_init(ms); /* Request interrupt */ if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) { printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); goto out_shutdown; } /* Add scsi host & scan */ if (scsi_add_host(mesh_host, &mdev->ofdev.dev)) goto out_release_irq; scsi_scan_host(mesh_host); return 0; out_release_irq: free_irq(ms->meshintr, ms); out_shutdown: /* shutdown & reset bus in case of error or macos can be confused * at reboot if the bus was set to synchronous mode already */ mesh_shutdown(mdev); set_mesh_power(ms, 0); pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); out_unmap: iounmap(ms->dma); iounmap(ms->mesh); out_free: scsi_host_put(mesh_host); out_release: macio_release_resources(mdev); return -ENODEV; } static int mesh_remove(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); struct Scsi_Host *mesh_host = ms->host; scsi_remove_host(mesh_host); free_irq(ms->meshintr, ms); /* Reset scsi bus */ mesh_shutdown(mdev); /* Shut down chip & termination */ set_mesh_power(ms, 0); /* Unmap registers & dma controller */ iounmap(ms->mesh); iounmap(ms->dma); /* Free DMA commands memory */ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); /* Release memory resources */ macio_release_resources(mdev); scsi_host_put(mesh_host); return 0; } static struct of_device_id mesh_match[] = { { .name = "mesh", }, { .type = "scsi", .compatible = "chrp,mesh0" }, {}, }; MODULE_DEVICE_TABLE (of, mesh_match); static struct macio_driver mesh_driver = { .driver = { .name = "mesh", .owner = THIS_MODULE, .of_match_table = mesh_match, }, .probe = mesh_probe, .remove = mesh_remove, .shutdown = mesh_shutdown, #ifdef CONFIG_PM .suspend = mesh_suspend, .resume = mesh_resume, #endif }; static int __init init_mesh(void) { /* Calculate sync rate from module parameters */ if (sync_rate > 10) sync_rate = 10; if (sync_rate > 0) { printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate); mesh_sync_period = 1000 / sync_rate; /* ns */ mesh_sync_offset = 15; } else printk(KERN_INFO "mesh: configured for asynchronous\n"); return macio_register_driver(&mesh_driver); } static void __exit exit_mesh(void) { return macio_unregister_driver(&mesh_driver); } module_init(init_mesh); module_exit(exit_mesh);
gpl-2.0
spacecaker/android_kernel_acer_swing-CAF
drivers/scsi/libsrp.c
8067
10780
/* * SCSI RDMA Protocol lib functions * * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/err.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_tgt.h> #include <scsi/srp.h> #include <scsi/libsrp.h> enum srp_task_attributes { SRP_SIMPLE_TASK = 0, SRP_HEAD_TASK = 1, SRP_ORDERED_TASK = 2, SRP_ACA_TASK = 4 }; /* tmp - will replace with SCSI logging stuff */ #define eprintk(fmt, args...) \ do { \ printk("%s(%d) " fmt, __func__, __LINE__, ##args); \ } while (0) /* #define dprintk eprintk */ #define dprintk(fmt, args...) static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, struct srp_buf **ring) { int i; struct iu_entry *iue; q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); if (!q->pool) return -ENOMEM; q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); if (!q->items) goto free_pool; spin_lock_init(&q->lock); kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *)); for (i = 0, iue = q->items; i < max; i++) { kfifo_in(&q->queue, (void *) &iue, sizeof(void *)); iue->sbuf = ring[i]; iue++; } return 0; kfree(q->items); free_pool: kfree(q->pool); return -ENOMEM; } static void srp_iu_pool_free(struct srp_queue *q) { kfree(q->items); kfree(q->pool); } static struct srp_buf **srp_ring_alloc(struct device *dev, size_t max, size_t size) { int i; struct srp_buf **ring; ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); if (!ring) return NULL; for (i = 0; i < max; i++) { ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL); if (!ring[i]) goto out; ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, GFP_KERNEL); if (!ring[i]->buf) goto out; } return ring; out: for (i = 0; i < max && ring[i]; i++) { if (ring[i]->buf) dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); kfree(ring[i]); } kfree(ring); return NULL; } static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, size_t size) { int i; for (i = 0; i < max; i++) { dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); kfree(ring[i]); } kfree(ring); } int srp_target_alloc(struct srp_target *target, struct device *dev, size_t nr, size_t iu_size) { int err; spin_lock_init(&target->lock); INIT_LIST_HEAD(&target->cmd_queue); target->dev = dev; dev_set_drvdata(target->dev, target); target->srp_iu_size = iu_size; target->rx_ring_size = nr; target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); if (!target->rx_ring) return -ENOMEM; err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); if (err) goto free_ring; return 0; free_ring: srp_ring_free(target->dev, target->rx_ring, nr, iu_size); return -ENOMEM; } EXPORT_SYMBOL_GPL(srp_target_alloc); void srp_target_free(struct srp_target *target) { srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, target->srp_iu_size); srp_iu_pool_free(&target->iu_queue); } EXPORT_SYMBOL_GPL(srp_target_free); struct iu_entry *srp_iu_get(struct srp_target *target) { struct iu_entry *iue = NULL; if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue, sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) { WARN_ONCE(1, "unexpected fifo state"); return NULL; } if (!iue) return iue; iue->target = target; INIT_LIST_HEAD(&iue->ilist); iue->flags = 0; return iue; } EXPORT_SYMBOL_GPL(srp_iu_get); void srp_iu_put(struct iu_entry *iue) { kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue, sizeof(void *), &iue->target->iu_queue.lock); } EXPORT_SYMBOL_GPL(srp_iu_put); static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct iu_entry *iue = NULL; struct scatterlist *sg = NULL; int err, nsg = 0, len; if (dma_map) { iue = (struct iu_entry *) sc->SCp.ptr; sg = scsi_sglist(sc); dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc), md->len, scsi_sg_count(sc)); nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc), DMA_BIDIRECTIONAL); if (!nsg) { printk("fail to map %p %d\n", iue, scsi_sg_count(sc)); return 0; } len = min(scsi_bufflen(sc), md->len); } else len = md->len; err = rdma_io(sc, sg, nsg, md, 1, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); return err; } static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd, struct srp_indirect_buf *id, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct iu_entry *iue = NULL; struct srp_direct_buf *md = NULL; struct scatterlist dummy, *sg = NULL; dma_addr_t token = 0; int err = 0; int nmd, nsg = 0, len; if (dma_map || ext_desc) { iue = (struct iu_entry *) sc->SCp.ptr; sg = scsi_sglist(sc); dprintk("%p %u %u %d %d\n", iue, scsi_bufflen(sc), id->len, cmd->data_in_desc_cnt, cmd->data_out_desc_cnt); } nmd = id->table_desc.len / sizeof(struct srp_direct_buf); if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) || (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) { md = &id->desc_list[0]; goto rdma; } if (ext_desc && dma_map) { md = dma_alloc_coherent(iue->target->dev, id->table_desc.len, &token, GFP_KERNEL); if (!md) { eprintk("Can't get dma memory %u\n", id->table_desc.len); return -ENOMEM; } sg_init_one(&dummy, md, id->table_desc.len); sg_dma_address(&dummy) = token; sg_dma_len(&dummy) = id->table_desc.len; err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, id->table_desc.len); if (err) { eprintk("Error copying indirect table %d\n", err); goto free_mem; } } else { eprintk("This command uses external indirect buffer\n"); return -EINVAL; } rdma: if (dma_map) { nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc), DMA_BIDIRECTIONAL); if (!nsg) { eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc)); err = -EIO; goto free_mem; } len = min(scsi_bufflen(sc), id->len); } else len = id->len; err = rdma_io(sc, sg, nsg, md, nmd, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); free_mem: if (token && dma_map) dma_free_coherent(iue->target->dev, id->table_desc.len, md, token); return err; } static int data_out_desc_size(struct srp_cmd *cmd) { int size = 0; u8 fmt = cmd->buf_fmt >> 4; switch (fmt) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: size = sizeof(struct srp_direct_buf); break; case SRP_DATA_DESC_INDIRECT: size = sizeof(struct srp_indirect_buf) + sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; break; default: eprintk("client error. Invalid data_out_format %x\n", fmt); break; } return size; } /* * TODO: this can be called multiple times for a single command if it * has very long data. */ int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct srp_direct_buf *md; struct srp_indirect_buf *id; enum dma_data_direction dir; int offset, err = 0; u8 format; offset = cmd->add_cdb_len & ~3; dir = srp_cmd_direction(cmd); if (dir == DMA_FROM_DEVICE) offset += data_out_desc_size(cmd); if (dir == DMA_TO_DEVICE) format = cmd->buf_fmt >> 4; else format = cmd->buf_fmt & ((1U << 4) - 1); switch (format) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: md = (struct srp_direct_buf *) (cmd->add_data + offset); err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc); break; case SRP_DATA_DESC_INDIRECT: id = (struct srp_indirect_buf *) (cmd->add_data + offset); err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map, ext_desc); break; default: eprintk("Unknown format %d %x\n", dir, format); err = -EINVAL; } return err; } EXPORT_SYMBOL_GPL(srp_transfer_data); static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) { struct srp_direct_buf *md; struct srp_indirect_buf *id; int len = 0, offset = cmd->add_cdb_len & ~3; u8 fmt; if (dir == DMA_TO_DEVICE) fmt = cmd->buf_fmt >> 4; else { fmt = cmd->buf_fmt & ((1U << 4) - 1); offset += data_out_desc_size(cmd); } switch (fmt) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: md = (struct srp_direct_buf *) (cmd->add_data + offset); len = md->len; break; case SRP_DATA_DESC_INDIRECT: id = (struct srp_indirect_buf *) (cmd->add_data + offset); len = id->len; break; default: eprintk("invalid data format %x\n", fmt); break; } return len; } int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info, u64 itn_id, u64 addr) { enum dma_data_direction dir; struct scsi_cmnd *sc; int tag, len, err; switch (cmd->task_attr) { case SRP_SIMPLE_TASK: tag = MSG_SIMPLE_TAG; break; case SRP_ORDERED_TASK: tag = MSG_ORDERED_TAG; break; case SRP_HEAD_TASK: tag = MSG_HEAD_TAG; break; default: eprintk("Task attribute %d not supported\n", cmd->task_attr); tag = MSG_ORDERED_TAG; } dir = srp_cmd_direction(cmd); len = vscsis_data_length(cmd, dir); dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0], cmd->lun, dir, len, tag, (unsigned long long) cmd->tag); sc = scsi_host_get_command(shost, dir, GFP_KERNEL); if (!sc) return -ENOMEM; sc->SCp.ptr = info; memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE); sc->sdb.length = len; sc->sdb.table.sgl = (void *) (unsigned long) addr; sc->tag = tag; err = scsi_tgt_queue_command(sc, itn_id, (struct scsi_lun *)&cmd->lun, cmd->tag); if (err) scsi_host_put_command(shost, sc); return err; } EXPORT_SYMBOL_GPL(srp_cmd_queue); MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); MODULE_AUTHOR("FUJITA Tomonori"); MODULE_LICENSE("GPL");
gpl-2.0