repo_name
string
path
string
copies
string
size
string
content
string
license
string
julianschweizer/kernel_23.0.1.A.0.xxx
arch/mips/netlogic/common/time.c
4692
2019
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/init.h> #include <asm/time.h> #include <asm/netlogic/interrupt.h> #include <asm/netlogic/common.h> unsigned int __cpuinit get_c0_compare_int(void) { return IRQ_TIMER; } void __init plat_time_init(void) { mips_hpt_frequency = nlm_get_cpu_frequency(); pr_info("MIPS counter frequency [%ld]\n", (unsigned long)mips_hpt_frequency); }
gpl-2.0
Chairshot215/starship_kernel_moto_shamu
net/irda/irlan/irlan_eth.c
7508
9647
/********************************************************************* * * Filename: irlan_eth.c * Version: * Description: * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Thu Oct 15 08:37:58 1998 * Modified at: Tue Mar 21 09:06:41 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov> * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> * * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/sched.h> #include <net/arp.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> #include <net/irda/irlan_common.h> #include <net/irda/irlan_client.h> #include <net/irda/irlan_event.h> #include <net/irda/irlan_eth.h> static int irlan_eth_open(struct net_device *dev); static int irlan_eth_close(struct net_device *dev); static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev); static void irlan_eth_set_multicast_list( struct net_device *dev); static const struct net_device_ops irlan_eth_netdev_ops = { .ndo_open = irlan_eth_open, .ndo_stop = irlan_eth_close, .ndo_start_xmit = irlan_eth_xmit, .ndo_set_rx_mode = irlan_eth_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /* * Function irlan_eth_setup (dev) * * The network device initialization function. * */ static void irlan_eth_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &irlan_eth_netdev_ops; dev->destructor = free_netdev; /* * Lets do all queueing in IrTTP instead of this device driver. * Queueing here as well can introduce some strange latency * problems, which we will avoid by setting the queue size to 0. */ /* * The bugs in IrTTP and IrLAN that created this latency issue * have now been fixed, and we can propagate flow control properly * to the network layer. However, this requires a minimal queue of * packets for the device. * Without flow control, the Tx Queue is 14 (ttp) + 0 (dev) = 14 * With flow control, the Tx Queue is 7 (ttp) + 4 (dev) = 11 * See irlan_eth_flow_indication()... * Note : this number was randomly selected and would need to * be adjusted. * Jean II */ dev->tx_queue_len = 4; } /* * Function alloc_irlandev * * Allocate network device and control block * */ struct net_device *alloc_irlandev(const char *name) { return alloc_netdev(sizeof(struct irlan_cb), name, irlan_eth_setup); } /* * Function irlan_eth_open (dev) * * Network device has been opened by user * */ static int irlan_eth_open(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); IRDA_DEBUG(2, "%s()\n", __func__ ); /* Ready to play! */ netif_stop_queue(dev); /* Wait until data link is ready */ /* We are now open, so time to do some work */ self->disconnect_reason = 0; irlan_client_wakeup(self, self->saddr, self->daddr); /* Make sure we have a hardware address before we return, so DHCP clients gets happy */ return wait_event_interruptible(self->open_wait, !self->tsap_data->connected); } /* * Function irlan_eth_close (dev) * * Stop the ether network device, his function will usually be called by * ifconfig down. We should now disconnect the link, We start the * close timer, so that the instance will be removed if we are unable * to discover the remote device after the disconnect. */ static int irlan_eth_close(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); IRDA_DEBUG(2, "%s()\n", __func__ ); /* Stop device */ netif_stop_queue(dev); irlan_close_data_channel(self); irlan_close_tsaps(self); irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL); irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL); /* Remove frames queued on the control channel */ skb_queue_purge(&self->client.txq); self->client.tx_busy = 0; return 0; } /* * Function irlan_eth_tx (skb) * * Transmits ethernet frames over IrDA link. * */ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); int ret; unsigned int len; /* skb headroom large enough to contain all IrDA-headers? */ if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, self->max_header_size); /* We have to free the original skb anyway */ dev_kfree_skb(skb); /* Did the realloc succeed? */ if (new_skb == NULL) return NETDEV_TX_OK; /* Use the new skb instead */ skb = new_skb; } dev->trans_start = jiffies; len = skb->len; /* Now queue the packet in the transport layer */ if (self->use_udata) ret = irttp_udata_request(self->tsap_data, skb); else ret = irttp_data_request(self->tsap_data, skb); if (ret < 0) { /* * IrTTPs tx queue is full, so we just have to * drop the frame! You might think that we should * just return -1 and don't deallocate the frame, * but that is dangerous since it's possible that * we have replaced the original skb with a new * one with larger headroom, and that would really * confuse do_dev_queue_xmit() in dev.c! I have * tried :-) DB */ /* irttp_data_request already free the packet */ dev->stats.tx_dropped++; } else { dev->stats.tx_packets++; dev->stats.tx_bytes += len; } return NETDEV_TX_OK; } /* * Function irlan_eth_receive (handle, skb) * * This function gets the data that is received on the data channel * */ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) { struct irlan_cb *self = instance; struct net_device *dev = self->dev; if (skb == NULL) { dev->stats.rx_dropped++; return 0; } if (skb->len < ETH_HLEN) { IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", __func__, skb->len); dev->stats.rx_dropped++; dev_kfree_skb(skb); return 0; } /* * Adopt this frame! Important to set all these fields since they * might have been previously set by the low level IrDA network * device driver */ skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */ dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; netif_rx(skb); /* Eat it! */ return 0; } /* * Function irlan_eth_flow (status) * * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by * controlling the queue stop/start. * * The IrDA link layer has the advantage to have flow control, and * IrTTP now properly handles that. Flow controlling the higher layers * prevent us to drop Tx packets in here (up to 15% for a TCP socket, * more for UDP socket). * Also, this allow us to reduce the overall transmit queue, which means * less latency in case of mixed traffic. * Jean II */ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) { struct irlan_cb *self; struct net_device *dev; self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); dev = self->dev; IRDA_ASSERT(dev != NULL, return;); IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__, flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", netif_running(dev)); switch (flow) { case FLOW_STOP: /* IrTTP is full, stop higher layers */ netif_stop_queue(dev); break; case FLOW_START: default: /* Tell upper layers that its time to transmit frames again */ /* Schedule network layer */ netif_wake_queue(dev); break; } } /* * Function set_multicast_list (dev) * * Configure the filtering of the device * */ #define HW_MAX_ADDRS 4 /* Must query to get it! */ static void irlan_eth_set_multicast_list(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); IRDA_DEBUG(2, "%s()\n", __func__ ); /* Check if data channel has been connected yet */ if (self->client.state != IRLAN_DATA) { IRDA_DEBUG(1, "%s(), delaying!\n", __func__ ); return; } if (dev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); } else if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > HW_MAX_ADDRS) { /* Disable promiscuous mode, use normal mode. */ IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); /* hardware_set_filter(NULL); */ irlan_set_multicast_filter(self, TRUE); } else if (!netdev_mc_empty(dev)) { IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ ); /* Walk the address list, and load the filter */ /* hardware_set_filter(dev->mc_list); */ irlan_set_multicast_filter(self, TRUE); } else { IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ ); irlan_set_multicast_filter(self, FALSE); } if (dev->flags & IFF_BROADCAST) irlan_set_broadcast_filter(self, TRUE); else irlan_set_broadcast_filter(self, FALSE); }
gpl-2.0
redglasses/android_kernel_lge_g3-V20f
arch/s390/kernel/jump_label.c
8020
1372
/* * Jump label s390 support * * Copyright IBM Corp. 2011 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <linux/jump_label.h> #include <asm/ipl.h> #ifdef HAVE_JUMP_LABEL struct insn { u16 opcode; s32 offset; } __packed; struct insn_args { struct jump_entry *entry; enum jump_label_type type; }; static void __jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { struct insn insn; int rc; if (type == JUMP_LABEL_ENABLE) { /* brcl 15,offset */ insn.opcode = 0xc0f4; insn.offset = (entry->target - entry->code) >> 1; } else { /* brcl 0,0 */ insn.opcode = 0xc004; insn.offset = 0; } rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); WARN_ON_ONCE(rc < 0); } static int __sm_arch_jump_label_transform(void *data) { struct insn_args *args = data; __jump_label_transform(args->entry, args->type); return 0; } void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { struct insn_args args; args.entry = entry; args.type = type; stop_machine(__sm_arch_jump_label_transform, &args, NULL); } void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { __jump_label_transform(entry, type); } #endif
gpl-2.0
cm-maya/android_kernel_hp_maya
arch/mips/pci/fixup-tb0219.c
9300
1438
/* * fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups. * * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp> * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/pci.h> #include <asm/vr41xx/tb0219.h> int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = -1; switch (slot) { case 12: irq = TB0219_PCI_SLOT1_IRQ; break; case 13: irq = TB0219_PCI_SLOT2_IRQ; break; case 14: irq = TB0219_PCI_SLOT3_IRQ; break; default: break; } return irq; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
falaze/nexus5n
drivers/media/video/usbvision/usbvision-cards.c
9812
33663
/* * usbvision-cards.c * usbvision cards definition file * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * * This module is part of usbvision driver project. * Updates to driver completed by Dwaine P. Garden * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/list.h> #include <linux/module.h> #include <media/v4l2-dev.h> #include <media/tuner.h> #include "usbvision.h" #include "usbvision-cards.h" /* Supported Devices: A table for usbvision.c*/ struct usbvision_device_data_st usbvision_device_data[] = { [XANBOO] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 4, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "Xanboo", }, [BELKIN_VIDEOBUS_II] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Belkin USB VideoBus II Adapter", }, [BELKIN_VIDEOBUS] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "Belkin Components USB VideoBus", }, [BELKIN_USB_VIDEOBUS_II] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Belkin USB VideoBus II", }, [ECHOFX_INTERVIEW_LITE] = { .interface = 0, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "echoFX InterView Lite", }, [USBGEAR_USBG_V1] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "USBGear USBG-V1 resp. HAMA USB", }, [D_LINK_V100] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 4, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "D-Link V100", }, [X10_USB_CAMERA] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "X10 USB Camera", }, [HPG_WINTV_LIVE_PAL_BG] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Live (PAL B/G)", }, [HPG_WINTV_LIVE_PRO_NTSC_MN] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Live Pro (NTSC M/N)", }, [ZORAN_PMD_NOGATECH] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 2, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Zoran Co. PMD (Nogatech) AV-grabber Manhattan", }, [NOGATECH_USB_TV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "Nogatech USB-TV (NTSC) FM", }, [PNY_USB_TV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "PNY USB-TV (NTSC) FM", }, [PV_PLAYTV_USB_PRO_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "PixelView PlayTv-USB PRO (PAL) FM", }, [ZT_721] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "ZTV ZT-721 2.4GHz USB A/V Receiver", }, [HPG_WINTV_NTSC_MN] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "Hauppauge WinTV USB (NTSC M/N)", }, [HPG_WINTV_PAL_BG] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL B/G)", }, [HPG_WINTV_PAL_I] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL I)", }, [HPG_WINTV_PAL_SECAM_L] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0x80, .y_offset = 0x16, .model_string = "Hauppauge WinTV USB (PAL/SECAM L)", }, [HPG_WINTV_PAL_D_K] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL D/K)", }, [HPG_WINTV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (NTSC FM)", }, [HPG_WINTV_PAL_BG_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL B/G FM)", }, [HPG_WINTV_PAL_I_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL I FM)", }, [HPG_WINTV_PAL_D_K_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL D/K FM)", }, [HPG_WINTV_PRO_NTSC_MN] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N)", }, [HPG_WINTV_PRO_NTSC_MN_V2] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V2", }, [HPG_WINTV_PRO_PAL] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)", }, [HPG_WINTV_PRO_NTSC_MN_V3] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V3", }, [HPG_WINTV_PRO_PAL_BG] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G)", }, [HPG_WINTV_PRO_PAL_I] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I)", }, [HPG_WINTV_PRO_PAL_SECAM_L] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM L)", }, [HPG_WINTV_PRO_PAL_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL D/K)", }, [HPG_WINTV_PRO_PAL_SECAM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)", }, [HPG_WINTV_PRO_PAL_SECAM_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2", }, [HPG_WINTV_PRO_PAL_BG_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_ALPS_TSBE1_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G) V2", }, [HPG_WINTV_PRO_PAL_BG_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_ALPS_TSBE1_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G,D/K)", }, [HPG_WINTV_PRO_PAL_I_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I,D/K)", }, [HPG_WINTV_PRO_NTSC_MN_FM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM)", }, [HPG_WINTV_PRO_PAL_BG_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G FM)", }, [HPG_WINTV_PRO_PAL_I_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I FM)", }, [HPG_WINTV_PRO_PAL_D_K_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL D/K FM)", }, [HPG_WINTV_PRO_TEMIC_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM)", }, [HPG_WINTV_PRO_TEMIC_PAL_BG_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (Temic PAL B/G FM)", }, [HPG_WINTV_PRO_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)", }, [HPG_WINTV_PRO_NTSC_MN_FM_V2] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM) V2", }, [CAMTEL_TVB330] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 5, .y_offset = 5, .model_string = "Camtel Technology USB TV Genie Pro FM Model TVB330", }, [DIGITAL_VIDEO_CREATOR_I] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Digital Video Creator I", }, [GLOBAL_VILLAGE_GV_007_NTSC] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 82, .y_offset = 20, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Global Village GV-007 (NTSC)", }, [DAZZLE_DVC_50_REV_1_NTSC] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)", }, [DAZZLE_DVC_80_REV_1_PAL] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-80 Rev 1 (PAL)", }, [DAZZLE_DVC_90_REV_1_SECAM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_SECAM, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)", }, [ESKAPE_LABS_MYTV2GO] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Eskape Labs MyTV2Go", }, [PINNA_PCTV_USB_PAL] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 0, .tuner = 1, .tuner_type = TUNER_TEMIC_4066FY5_PAL_I, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (PAL)", }, [PINNA_PCTV_USB_SECAM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (SECAM)", }, [PINNA_PCTV_USB_PAL_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 128, .y_offset = 23, .model_string = "Pinnacle Studio PCTV USB (PAL) FM", }, [MIRO_PCTV_USB] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Miro PCTV USB", }, [PINNA_PCTV_USB_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM", }, [PINNA_PCTV_USB_NTSC_FM_V3] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V3", }, [PINNA_PCTV_USB_PAL_FM_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (PAL) FM V2", }, [PINNA_PCTV_USB_NTSC_FM_V2] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4039FR5_NTSC, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V2", }, [PINNA_PCTV_USB_PAL_FM_V3] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (PAL) FM V3", }, [PINNA_LINX_VD_IN_CAB_NTSC] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio Linx Video input cable (NTSC)", }, [PINNA_LINX_VD_IN_CAB_PAL] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio Linx Video input cable (PAL)", }, [PINNA_PCTV_BUNGEE_PAL_FM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle PCTV Bungee USB (PAL) FM", }, [HPG_WINTV] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTv-USB", }, [MICROCAM_NTSC] = { .interface = -1, .codec = CODEC_WEBCAM, .video_channels = 1, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 0, .tuner = 0, .tuner_type = 0, .x_offset = 71, .y_offset = 15, .model_string = "Nogatech USB MicroCam NTSC (NV3000N)", }, [MICROCAM_PAL] = { .interface = -1, .codec = CODEC_WEBCAM, .video_channels = 1, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 0, .tuner = 0, .tuner_type = 0, .x_offset = 71, .y_offset = 18, .model_string = "Nogatech USB MicroCam PAL (NV3001P)", }, }; const int usbvision_device_data_size = ARRAY_SIZE(usbvision_device_data); /* Supported Devices */ struct usb_device_id usbvision_table[] = { { USB_DEVICE(0x0a6f, 0x0400), .driver_info = XANBOO }, { USB_DEVICE(0x050d, 0x0106), .driver_info = BELKIN_VIDEOBUS_II }, { USB_DEVICE(0x050d, 0x0207), .driver_info = BELKIN_VIDEOBUS }, { USB_DEVICE(0x050d, 0x0208), .driver_info = BELKIN_USB_VIDEOBUS_II }, { USB_DEVICE(0x0571, 0x0002), .driver_info = ECHOFX_INTERVIEW_LITE }, { USB_DEVICE(0x0573, 0x0003), .driver_info = USBGEAR_USBG_V1 }, { USB_DEVICE(0x0573, 0x0400), .driver_info = D_LINK_V100 }, { USB_DEVICE(0x0573, 0x2000), .driver_info = X10_USB_CAMERA }, { USB_DEVICE(0x0573, 0x2d00), .driver_info = HPG_WINTV_LIVE_PAL_BG }, { USB_DEVICE(0x0573, 0x2d01), .driver_info = HPG_WINTV_LIVE_PRO_NTSC_MN }, { USB_DEVICE(0x0573, 0x2101), .driver_info = ZORAN_PMD_NOGATECH }, { USB_DEVICE(0x0573, 0x3000), .driver_info = MICROCAM_NTSC }, { USB_DEVICE(0x0573, 0x3001), .driver_info = MICROCAM_PAL }, { USB_DEVICE(0x0573, 0x4100), .driver_info = NOGATECH_USB_TV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4110), .driver_info = PNY_USB_TV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4450), .driver_info = PV_PLAYTV_USB_PRO_PAL_FM }, { USB_DEVICE(0x0573, 0x4550), .driver_info = ZT_721 }, { USB_DEVICE(0x0573, 0x4d00), .driver_info = HPG_WINTV_NTSC_MN }, { USB_DEVICE(0x0573, 0x4d01), .driver_info = HPG_WINTV_PAL_BG }, { USB_DEVICE(0x0573, 0x4d02), .driver_info = HPG_WINTV_PAL_I }, { USB_DEVICE(0x0573, 0x4d03), .driver_info = HPG_WINTV_PAL_SECAM_L }, { USB_DEVICE(0x0573, 0x4d04), .driver_info = HPG_WINTV_PAL_D_K }, { USB_DEVICE(0x0573, 0x4d10), .driver_info = HPG_WINTV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4d11), .driver_info = HPG_WINTV_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d12), .driver_info = HPG_WINTV_PAL_I_FM }, { USB_DEVICE(0x0573, 0x4d14), .driver_info = HPG_WINTV_PAL_D_K_FM }, { USB_DEVICE(0x0573, 0x4d2a), .driver_info = HPG_WINTV_PRO_NTSC_MN }, { USB_DEVICE(0x0573, 0x4d2b), .driver_info = HPG_WINTV_PRO_NTSC_MN_V2 }, { USB_DEVICE(0x0573, 0x4d2c), .driver_info = HPG_WINTV_PRO_PAL }, { USB_DEVICE(0x0573, 0x4d20), .driver_info = HPG_WINTV_PRO_NTSC_MN_V3 }, { USB_DEVICE(0x0573, 0x4d21), .driver_info = HPG_WINTV_PRO_PAL_BG }, { USB_DEVICE(0x0573, 0x4d22), .driver_info = HPG_WINTV_PRO_PAL_I }, { USB_DEVICE(0x0573, 0x4d23), .driver_info = HPG_WINTV_PRO_PAL_SECAM_L }, { USB_DEVICE(0x0573, 0x4d24), .driver_info = HPG_WINTV_PRO_PAL_D_K }, { USB_DEVICE(0x0573, 0x4d25), .driver_info = HPG_WINTV_PRO_PAL_SECAM }, { USB_DEVICE(0x0573, 0x4d26), .driver_info = HPG_WINTV_PRO_PAL_SECAM_V2 }, { USB_DEVICE(0x0573, 0x4d27), .driver_info = HPG_WINTV_PRO_PAL_BG_V2 }, { USB_DEVICE(0x0573, 0x4d28), .driver_info = HPG_WINTV_PRO_PAL_BG_D_K }, { USB_DEVICE(0x0573, 0x4d29), .driver_info = HPG_WINTV_PRO_PAL_I_D_K }, { USB_DEVICE(0x0573, 0x4d30), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM }, { USB_DEVICE(0x0573, 0x4d31), .driver_info = HPG_WINTV_PRO_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d32), .driver_info = HPG_WINTV_PRO_PAL_I_FM }, { USB_DEVICE(0x0573, 0x4d34), .driver_info = HPG_WINTV_PRO_PAL_D_K_FM }, { USB_DEVICE(0x0573, 0x4d35), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_FM }, { USB_DEVICE(0x0573, 0x4d36), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d37), .driver_info = HPG_WINTV_PRO_PAL_FM }, { USB_DEVICE(0x0573, 0x4d38), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM_V2 }, { USB_DEVICE(0x0768, 0x0006), .driver_info = CAMTEL_TVB330 }, { USB_DEVICE(0x07d0, 0x0001), .driver_info = DIGITAL_VIDEO_CREATOR_I }, { USB_DEVICE(0x07d0, 0x0002), .driver_info = GLOBAL_VILLAGE_GV_007_NTSC }, { USB_DEVICE(0x07d0, 0x0003), .driver_info = DAZZLE_DVC_50_REV_1_NTSC }, { USB_DEVICE(0x07d0, 0x0004), .driver_info = DAZZLE_DVC_80_REV_1_PAL }, { USB_DEVICE(0x07d0, 0x0005), .driver_info = DAZZLE_DVC_90_REV_1_SECAM }, { USB_DEVICE(0x07f8, 0x9104), .driver_info = ESKAPE_LABS_MYTV2GO }, { USB_DEVICE(0x2304, 0x010d), .driver_info = PINNA_PCTV_USB_PAL }, { USB_DEVICE(0x2304, 0x0109), .driver_info = PINNA_PCTV_USB_SECAM }, { USB_DEVICE(0x2304, 0x0110), .driver_info = PINNA_PCTV_USB_PAL_FM }, { USB_DEVICE(0x2304, 0x0111), .driver_info = MIRO_PCTV_USB }, { USB_DEVICE(0x2304, 0x0112), .driver_info = PINNA_PCTV_USB_NTSC_FM }, { USB_DEVICE(0x2304, 0x0113), .driver_info = PINNA_PCTV_USB_NTSC_FM_V3 }, { USB_DEVICE(0x2304, 0x0210), .driver_info = PINNA_PCTV_USB_PAL_FM_V2 }, { USB_DEVICE(0x2304, 0x0212), .driver_info = PINNA_PCTV_USB_NTSC_FM_V2 }, { USB_DEVICE(0x2304, 0x0214), .driver_info = PINNA_PCTV_USB_PAL_FM_V3 }, { USB_DEVICE(0x2304, 0x0300), .driver_info = PINNA_LINX_VD_IN_CAB_NTSC }, { USB_DEVICE(0x2304, 0x0301), .driver_info = PINNA_LINX_VD_IN_CAB_PAL }, { USB_DEVICE(0x2304, 0x0419), .driver_info = PINNA_PCTV_BUNGEE_PAL_FM }, { USB_DEVICE(0x2400, 0x4200), .driver_info = HPG_WINTV }, { }, /* terminate list */ }; MODULE_DEVICE_TABLE(usb, usbvision_table);
gpl-2.0
hallor/linux
drivers/media/usb/usbvision/usbvision-cards.c
9812
33663
/* * usbvision-cards.c * usbvision cards definition file * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * * This module is part of usbvision driver project. * Updates to driver completed by Dwaine P. Garden * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/list.h> #include <linux/module.h> #include <media/v4l2-dev.h> #include <media/tuner.h> #include "usbvision.h" #include "usbvision-cards.h" /* Supported Devices: A table for usbvision.c*/ struct usbvision_device_data_st usbvision_device_data[] = { [XANBOO] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 4, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "Xanboo", }, [BELKIN_VIDEOBUS_II] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Belkin USB VideoBus II Adapter", }, [BELKIN_VIDEOBUS] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "Belkin Components USB VideoBus", }, [BELKIN_USB_VIDEOBUS_II] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Belkin USB VideoBus II", }, [ECHOFX_INTERVIEW_LITE] = { .interface = 0, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "echoFX InterView Lite", }, [USBGEAR_USBG_V1] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "USBGear USBG-V1 resp. HAMA USB", }, [D_LINK_V100] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 4, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "D-Link V100", }, [X10_USB_CAMERA] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = -1, .model_string = "X10 USB Camera", }, [HPG_WINTV_LIVE_PAL_BG] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = -1, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Live (PAL B/G)", }, [HPG_WINTV_LIVE_PRO_NTSC_MN] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Live Pro (NTSC M/N)", }, [ZORAN_PMD_NOGATECH] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 2, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Zoran Co. PMD (Nogatech) AV-grabber Manhattan", }, [NOGATECH_USB_TV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "Nogatech USB-TV (NTSC) FM", }, [PNY_USB_TV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "PNY USB-TV (NTSC) FM", }, [PV_PLAYTV_USB_PRO_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "PixelView PlayTv-USB PRO (PAL) FM", }, [ZT_721] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "ZTV ZT-721 2.4GHz USB A/V Receiver", }, [HPG_WINTV_NTSC_MN] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = 20, .model_string = "Hauppauge WinTV USB (NTSC M/N)", }, [HPG_WINTV_PAL_BG] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL B/G)", }, [HPG_WINTV_PAL_I] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL I)", }, [HPG_WINTV_PAL_SECAM_L] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0x80, .y_offset = 0x16, .model_string = "Hauppauge WinTV USB (PAL/SECAM L)", }, [HPG_WINTV_PAL_D_K] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL D/K)", }, [HPG_WINTV_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (NTSC FM)", }, [HPG_WINTV_PAL_BG_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL B/G FM)", }, [HPG_WINTV_PAL_I_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL I FM)", }, [HPG_WINTV_PAL_D_K_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTV USB (PAL D/K FM)", }, [HPG_WINTV_PRO_NTSC_MN] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N)", }, [HPG_WINTV_PRO_NTSC_MN_V2] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V2", }, [HPG_WINTV_PRO_PAL] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)", }, [HPG_WINTV_PRO_NTSC_MN_V3] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N) V3", }, [HPG_WINTV_PRO_PAL_BG] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G)", }, [HPG_WINTV_PRO_PAL_I] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I)", }, [HPG_WINTV_PRO_PAL_SECAM_L] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM L)", }, [HPG_WINTV_PRO_PAL_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL D/K)", }, [HPG_WINTV_PRO_PAL_SECAM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)", }, [HPG_WINTV_PRO_PAL_SECAM_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2", }, [HPG_WINTV_PRO_PAL_BG_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_ALPS_TSBE1_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G) V2", }, [HPG_WINTV_PRO_PAL_BG_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_ALPS_TSBE1_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G,D/K)", }, [HPG_WINTV_PRO_PAL_I_D_K] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I,D/K)", }, [HPG_WINTV_PRO_NTSC_MN_FM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM)", }, [HPG_WINTV_PRO_PAL_BG_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL B/G FM)", }, [HPG_WINTV_PRO_PAL_I_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL I FM)", }, [HPG_WINTV_PRO_PAL_D_K_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL D/K FM)", }, [HPG_WINTV_PRO_TEMIC_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM)", }, [HPG_WINTV_PRO_TEMIC_PAL_BG_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_MICROTUNE_4049FM5, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (Temic PAL B/G FM)", }, [HPG_WINTV_PRO_PAL_FM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)", }, [HPG_WINTV_PRO_NTSC_MN_FM_V2] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Hauppauge WinTV USB Pro (NTSC M/N FM) V2", }, [CAMTEL_TVB330] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = 5, .y_offset = 5, .model_string = "Camtel Technology USB TV Genie Pro FM Model TVB330", }, [DIGITAL_VIDEO_CREATOR_I] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Digital Video Creator I", }, [GLOBAL_VILLAGE_GV_007_NTSC] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 82, .y_offset = 20, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Global Village GV-007 (NTSC)", }, [DAZZLE_DVC_50_REV_1_NTSC] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-50 Rev 1 (NTSC)", }, [DAZZLE_DVC_80_REV_1_PAL] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-80 Rev 1 (PAL)", }, [DAZZLE_DVC_90_REV_1_SECAM] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_SECAM, .audio_channels = 0, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Dazzle Fusion Model DVC-90 Rev 1 (SECAM)", }, [ESKAPE_LABS_MYTV2GO] = { .interface = 0, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Eskape Labs MyTV2Go", }, [PINNA_PCTV_USB_PAL] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 0, .tuner = 1, .tuner_type = TUNER_TEMIC_4066FY5_PAL_I, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (PAL)", }, [PINNA_PCTV_USB_SECAM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_SECAM, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_SECAM, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (SECAM)", }, [PINNA_PCTV_USB_PAL_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = 128, .y_offset = 23, .model_string = "Pinnacle Studio PCTV USB (PAL) FM", }, [MIRO_PCTV_USB] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_PAL, .x_offset = -1, .y_offset = -1, .model_string = "Miro PCTV USB", }, [PINNA_PCTV_USB_NTSC_FM] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM", }, [PINNA_PCTV_USB_NTSC_FM_V3] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V3", }, [PINNA_PCTV_USB_PAL_FM_V2] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (PAL) FM V2", }, [PINNA_PCTV_USB_NTSC_FM_V2] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4039FR5_NTSC, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (NTSC) FM V2", }, [PINNA_PCTV_USB_PAL_FM_V3] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio PCTV USB (PAL) FM V3", }, [PINNA_LINX_VD_IN_CAB_NTSC] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio Linx Video input cable (NTSC)", }, [PINNA_LINX_VD_IN_CAB_PAL] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 2, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 0, .tuner_type = 0, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle Studio Linx Video input cable (PAL)", }, [PINNA_PCTV_BUNGEE_PAL_FM] = { .interface = -1, .codec = CODEC_SAA7113, .video_channels = 3, .video_norm = V4L2_STD_PAL, .audio_channels = 1, .radio = 1, .vbi = 1, .tuner = 1, .tuner_type = TUNER_TEMIC_4009FR5_PAL, .x_offset = 0, .y_offset = 3, .dvi_yuv_override = 1, .dvi_yuv = 7, .model_string = "Pinnacle PCTV Bungee USB (PAL) FM", }, [HPG_WINTV] = { .interface = -1, .codec = CODEC_SAA7111, .video_channels = 3, .video_norm = V4L2_STD_NTSC, .audio_channels = 1, .radio = 0, .vbi = 1, .tuner = 1, .tuner_type = TUNER_PHILIPS_NTSC_M, .x_offset = -1, .y_offset = -1, .model_string = "Hauppauge WinTv-USB", }, [MICROCAM_NTSC] = { .interface = -1, .codec = CODEC_WEBCAM, .video_channels = 1, .video_norm = V4L2_STD_NTSC, .audio_channels = 0, .radio = 0, .vbi = 0, .tuner = 0, .tuner_type = 0, .x_offset = 71, .y_offset = 15, .model_string = "Nogatech USB MicroCam NTSC (NV3000N)", }, [MICROCAM_PAL] = { .interface = -1, .codec = CODEC_WEBCAM, .video_channels = 1, .video_norm = V4L2_STD_PAL, .audio_channels = 0, .radio = 0, .vbi = 0, .tuner = 0, .tuner_type = 0, .x_offset = 71, .y_offset = 18, .model_string = "Nogatech USB MicroCam PAL (NV3001P)", }, }; const int usbvision_device_data_size = ARRAY_SIZE(usbvision_device_data); /* Supported Devices */ struct usb_device_id usbvision_table[] = { { USB_DEVICE(0x0a6f, 0x0400), .driver_info = XANBOO }, { USB_DEVICE(0x050d, 0x0106), .driver_info = BELKIN_VIDEOBUS_II }, { USB_DEVICE(0x050d, 0x0207), .driver_info = BELKIN_VIDEOBUS }, { USB_DEVICE(0x050d, 0x0208), .driver_info = BELKIN_USB_VIDEOBUS_II }, { USB_DEVICE(0x0571, 0x0002), .driver_info = ECHOFX_INTERVIEW_LITE }, { USB_DEVICE(0x0573, 0x0003), .driver_info = USBGEAR_USBG_V1 }, { USB_DEVICE(0x0573, 0x0400), .driver_info = D_LINK_V100 }, { USB_DEVICE(0x0573, 0x2000), .driver_info = X10_USB_CAMERA }, { USB_DEVICE(0x0573, 0x2d00), .driver_info = HPG_WINTV_LIVE_PAL_BG }, { USB_DEVICE(0x0573, 0x2d01), .driver_info = HPG_WINTV_LIVE_PRO_NTSC_MN }, { USB_DEVICE(0x0573, 0x2101), .driver_info = ZORAN_PMD_NOGATECH }, { USB_DEVICE(0x0573, 0x3000), .driver_info = MICROCAM_NTSC }, { USB_DEVICE(0x0573, 0x3001), .driver_info = MICROCAM_PAL }, { USB_DEVICE(0x0573, 0x4100), .driver_info = NOGATECH_USB_TV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4110), .driver_info = PNY_USB_TV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4450), .driver_info = PV_PLAYTV_USB_PRO_PAL_FM }, { USB_DEVICE(0x0573, 0x4550), .driver_info = ZT_721 }, { USB_DEVICE(0x0573, 0x4d00), .driver_info = HPG_WINTV_NTSC_MN }, { USB_DEVICE(0x0573, 0x4d01), .driver_info = HPG_WINTV_PAL_BG }, { USB_DEVICE(0x0573, 0x4d02), .driver_info = HPG_WINTV_PAL_I }, { USB_DEVICE(0x0573, 0x4d03), .driver_info = HPG_WINTV_PAL_SECAM_L }, { USB_DEVICE(0x0573, 0x4d04), .driver_info = HPG_WINTV_PAL_D_K }, { USB_DEVICE(0x0573, 0x4d10), .driver_info = HPG_WINTV_NTSC_FM }, { USB_DEVICE(0x0573, 0x4d11), .driver_info = HPG_WINTV_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d12), .driver_info = HPG_WINTV_PAL_I_FM }, { USB_DEVICE(0x0573, 0x4d14), .driver_info = HPG_WINTV_PAL_D_K_FM }, { USB_DEVICE(0x0573, 0x4d2a), .driver_info = HPG_WINTV_PRO_NTSC_MN }, { USB_DEVICE(0x0573, 0x4d2b), .driver_info = HPG_WINTV_PRO_NTSC_MN_V2 }, { USB_DEVICE(0x0573, 0x4d2c), .driver_info = HPG_WINTV_PRO_PAL }, { USB_DEVICE(0x0573, 0x4d20), .driver_info = HPG_WINTV_PRO_NTSC_MN_V3 }, { USB_DEVICE(0x0573, 0x4d21), .driver_info = HPG_WINTV_PRO_PAL_BG }, { USB_DEVICE(0x0573, 0x4d22), .driver_info = HPG_WINTV_PRO_PAL_I }, { USB_DEVICE(0x0573, 0x4d23), .driver_info = HPG_WINTV_PRO_PAL_SECAM_L }, { USB_DEVICE(0x0573, 0x4d24), .driver_info = HPG_WINTV_PRO_PAL_D_K }, { USB_DEVICE(0x0573, 0x4d25), .driver_info = HPG_WINTV_PRO_PAL_SECAM }, { USB_DEVICE(0x0573, 0x4d26), .driver_info = HPG_WINTV_PRO_PAL_SECAM_V2 }, { USB_DEVICE(0x0573, 0x4d27), .driver_info = HPG_WINTV_PRO_PAL_BG_V2 }, { USB_DEVICE(0x0573, 0x4d28), .driver_info = HPG_WINTV_PRO_PAL_BG_D_K }, { USB_DEVICE(0x0573, 0x4d29), .driver_info = HPG_WINTV_PRO_PAL_I_D_K }, { USB_DEVICE(0x0573, 0x4d30), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM }, { USB_DEVICE(0x0573, 0x4d31), .driver_info = HPG_WINTV_PRO_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d32), .driver_info = HPG_WINTV_PRO_PAL_I_FM }, { USB_DEVICE(0x0573, 0x4d34), .driver_info = HPG_WINTV_PRO_PAL_D_K_FM }, { USB_DEVICE(0x0573, 0x4d35), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_FM }, { USB_DEVICE(0x0573, 0x4d36), .driver_info = HPG_WINTV_PRO_TEMIC_PAL_BG_FM }, { USB_DEVICE(0x0573, 0x4d37), .driver_info = HPG_WINTV_PRO_PAL_FM }, { USB_DEVICE(0x0573, 0x4d38), .driver_info = HPG_WINTV_PRO_NTSC_MN_FM_V2 }, { USB_DEVICE(0x0768, 0x0006), .driver_info = CAMTEL_TVB330 }, { USB_DEVICE(0x07d0, 0x0001), .driver_info = DIGITAL_VIDEO_CREATOR_I }, { USB_DEVICE(0x07d0, 0x0002), .driver_info = GLOBAL_VILLAGE_GV_007_NTSC }, { USB_DEVICE(0x07d0, 0x0003), .driver_info = DAZZLE_DVC_50_REV_1_NTSC }, { USB_DEVICE(0x07d0, 0x0004), .driver_info = DAZZLE_DVC_80_REV_1_PAL }, { USB_DEVICE(0x07d0, 0x0005), .driver_info = DAZZLE_DVC_90_REV_1_SECAM }, { USB_DEVICE(0x07f8, 0x9104), .driver_info = ESKAPE_LABS_MYTV2GO }, { USB_DEVICE(0x2304, 0x010d), .driver_info = PINNA_PCTV_USB_PAL }, { USB_DEVICE(0x2304, 0x0109), .driver_info = PINNA_PCTV_USB_SECAM }, { USB_DEVICE(0x2304, 0x0110), .driver_info = PINNA_PCTV_USB_PAL_FM }, { USB_DEVICE(0x2304, 0x0111), .driver_info = MIRO_PCTV_USB }, { USB_DEVICE(0x2304, 0x0112), .driver_info = PINNA_PCTV_USB_NTSC_FM }, { USB_DEVICE(0x2304, 0x0113), .driver_info = PINNA_PCTV_USB_NTSC_FM_V3 }, { USB_DEVICE(0x2304, 0x0210), .driver_info = PINNA_PCTV_USB_PAL_FM_V2 }, { USB_DEVICE(0x2304, 0x0212), .driver_info = PINNA_PCTV_USB_NTSC_FM_V2 }, { USB_DEVICE(0x2304, 0x0214), .driver_info = PINNA_PCTV_USB_PAL_FM_V3 }, { USB_DEVICE(0x2304, 0x0300), .driver_info = PINNA_LINX_VD_IN_CAB_NTSC }, { USB_DEVICE(0x2304, 0x0301), .driver_info = PINNA_LINX_VD_IN_CAB_PAL }, { USB_DEVICE(0x2304, 0x0419), .driver_info = PINNA_PCTV_BUNGEE_PAL_FM }, { USB_DEVICE(0x2400, 0x4200), .driver_info = HPG_WINTV }, { }, /* terminate list */ }; MODULE_DEVICE_TABLE(usb, usbvision_table);
gpl-2.0
jazzsir/iamroot-linux-arm10c
drivers/misc/eeprom/digsy_mtc_eeprom.c
10324
2098
/* * EEPROMs access control driver for display configuration EEPROMs * on DigsyMTC board. * * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> #include <linux/eeprom_93xx46.h> #define GPIO_EEPROM_CLK 216 #define GPIO_EEPROM_CS 210 #define GPIO_EEPROM_DI 217 #define GPIO_EEPROM_DO 249 #define GPIO_EEPROM_OE 255 #define EE_SPI_BUS_NUM 1 static void digsy_mtc_op_prepare(void *p) { /* enable */ gpio_set_value(GPIO_EEPROM_OE, 0); } static void digsy_mtc_op_finish(void *p) { /* disable */ gpio_set_value(GPIO_EEPROM_OE, 1); } struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = { .flags = EE_ADDR8, .prepare = digsy_mtc_op_prepare, .finish = digsy_mtc_op_finish, }; static struct spi_gpio_platform_data eeprom_spi_gpio_data = { .sck = GPIO_EEPROM_CLK, .mosi = GPIO_EEPROM_DI, .miso = GPIO_EEPROM_DO, .num_chipselect = 1, }; static struct platform_device digsy_mtc_eeprom = { .name = "spi_gpio", .id = EE_SPI_BUS_NUM, .dev = { .platform_data = &eeprom_spi_gpio_data, }, }; static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = { { .modalias = "93xx46", .max_speed_hz = 1000000, .bus_num = EE_SPI_BUS_NUM, .chip_select = 0, .mode = SPI_MODE_0, .controller_data = (void *)GPIO_EEPROM_CS, .platform_data = &digsy_mtc_eeprom_data, }, }; static int __init digsy_mtc_eeprom_devices_init(void) { int ret; ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH, "93xx46 EEPROMs OE"); if (ret) { pr_err("can't request gpio %d\n", GPIO_EEPROM_OE); return ret; } spi_register_board_info(digsy_mtc_eeprom_info, ARRAY_SIZE(digsy_mtc_eeprom_info)); return platform_device_register(&digsy_mtc_eeprom); } device_initcall(digsy_mtc_eeprom_devices_init);
gpl-2.0
siis/pfwall
drivers/zorro/names.c
12628
2555
/* * Zorro Device Name Tables * * Copyright (C) 1999--2000 Geert Uytterhoeven * * Based on the PCI version: * * Copyright 1992--1999 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang, Martin Mares */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/zorro.h> #ifdef CONFIG_ZORRO_NAMES struct zorro_prod_info { __u16 prod; unsigned short seen; const char *name; }; struct zorro_manuf_info { __u16 manuf; unsigned short nr; const char *name; struct zorro_prod_info *prods; }; /* * This is ridiculous, but we want the strings in * the .init section so that they don't take up * real memory.. Parse the same file multiple times * to get all the info. */ #define MANUF( manuf, name ) static char __manufstr_##manuf[] __initdata = name; #define ENDMANUF() #define PRODUCT( manuf, prod, name ) static char __prodstr_##manuf##prod[] __initdata = name; #include "devlist.h" #define MANUF( manuf, name ) static struct zorro_prod_info __prods_##manuf[] __initdata = { #define ENDMANUF() }; #define PRODUCT( manuf, prod, name ) { 0x##prod, 0, __prodstr_##manuf##prod }, #include "devlist.h" static struct zorro_manuf_info __initdata zorro_manuf_list[] = { #define MANUF( manuf, name ) { 0x##manuf, sizeof(__prods_##manuf) / sizeof(struct zorro_prod_info), __manufstr_##manuf, __prods_##manuf }, #define ENDMANUF() #define PRODUCT( manuf, prod, name ) #include "devlist.h" }; #define MANUFS (sizeof(zorro_manuf_list)/sizeof(struct zorro_manuf_info)) void __init zorro_name_device(struct zorro_dev *dev) { const struct zorro_manuf_info *manuf_p = zorro_manuf_list; int i = MANUFS; char *name = dev->name; do { if (manuf_p->manuf == ZORRO_MANUF(dev->id)) goto match_manuf; manuf_p++; } while (--i); /* Couldn't find either the manufacturer nor the product */ sprintf(name, "Zorro device %08x", dev->id); return; match_manuf: { struct zorro_prod_info *prod_p = manuf_p->prods; int i = manuf_p->nr; while (i > 0) { if (prod_p->prod == ((ZORRO_PROD(dev->id)<<8) | ZORRO_EPC(dev->id))) goto match_prod; prod_p++; i--; } /* Ok, found the manufacturer, but unknown product */ sprintf(name, "Zorro device %08x (%s)", dev->id, manuf_p->name); return; /* Full match */ match_prod: { char *n = name + sprintf(name, "%s %s", manuf_p->name, prod_p->name); int nr = prod_p->seen + 1; prod_p->seen = nr; if (nr > 1) sprintf(n, " (#%d)", nr); } } } #else void __init zorro_name_device(struct zorro_dev *dev) { } #endif
gpl-2.0
SaberMod/android_kernel_lge_hammerhead-sts
arch/x86/math-emu/poly_sin.c
14420
10848
/*---------------------------------------------------------------------------+ | poly_sin.c | | | | Computation of an approximation of the sin function and the cosine | | function by a polynomial. | | | | Copyright (C) 1992,1993,1994,1997,1999 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@melbpc.org.au | | | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "fpu_system.h" #include "control_w.h" #include "poly.h" #define N_COEFF_P 4 #define N_COEFF_N 4 static const unsigned long long pos_terms_l[N_COEFF_P] = { 0xaaaaaaaaaaaaaaabLL, 0x00d00d00d00cf906LL, 0x000006b99159a8bbLL, 0x000000000d7392e6LL }; static const unsigned long long neg_terms_l[N_COEFF_N] = { 0x2222222222222167LL, 0x0002e3bc74aab624LL, 0x0000000b09229062LL, 0x00000000000c7973LL }; #define N_COEFF_PH 4 #define N_COEFF_NH 4 static const unsigned long long pos_terms_h[N_COEFF_PH] = { 0x0000000000000000LL, 0x05b05b05b05b0406LL, 0x000049f93edd91a9LL, 0x00000000c9c9ed62LL }; static const unsigned long long neg_terms_h[N_COEFF_NH] = { 0xaaaaaaaaaaaaaa98LL, 0x001a01a01a019064LL, 0x0000008f76c68a77LL, 0x0000000000d58f5eLL }; /*--- poly_sine() -----------------------------------------------------------+ | | +---------------------------------------------------------------------------*/ void poly_sine(FPU_REG *st0_ptr) { int exponent, echange; Xsig accumulator, argSqrd, argTo4; unsigned long fix_up, adj; unsigned long long fixed_arg; FPU_REG result; exponent = exponent(st0_ptr); accumulator.lsw = accumulator.midw = accumulator.msw = 0; /* Split into two ranges, for arguments below and above 1.0 */ /* The boundary between upper and lower is approx 0.88309101259 */ if ((exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xe21240aa))) { /* The argument is <= 0.88309101259 */ argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl; argSqrd.lsw = 0; mul64_Xsig(&argSqrd, &significand(st0_ptr)); shr_Xsig(&argSqrd, 2 * (-1 - exponent)); argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw; argTo4.lsw = argSqrd.lsw; mul_Xsig_Xsig(&argTo4, &argTo4); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l, N_COEFF_N - 1); mul_Xsig_Xsig(&accumulator, &argSqrd); negate_Xsig(&accumulator); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l, N_COEFF_P - 1); shr_Xsig(&accumulator, 2); /* Divide by four */ accumulator.msw |= 0x80000000; /* Add 1.0 */ mul64_Xsig(&accumulator, &significand(st0_ptr)); mul64_Xsig(&accumulator, &significand(st0_ptr)); mul64_Xsig(&accumulator, &significand(st0_ptr)); /* Divide by four, FPU_REG compatible, etc */ exponent = 3 * exponent; /* The minimum exponent difference is 3 */ shr_Xsig(&accumulator, exponent(st0_ptr) - exponent); negate_Xsig(&accumulator); XSIG_LL(accumulator) += significand(st0_ptr); echange = round_Xsig(&accumulator); setexponentpos(&result, exponent(st0_ptr) + echange); } else { /* The argument is > 0.88309101259 */ /* We use sin(st(0)) = cos(pi/2-st(0)) */ fixed_arg = significand(st0_ptr); if (exponent == 0) { /* The argument is >= 1.0 */ /* Put the binary point at the left. */ fixed_arg <<= 1; } /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */ fixed_arg = 0x921fb54442d18469LL - fixed_arg; /* There is a special case which arises due to rounding, to fix here. */ if (fixed_arg == 0xffffffffffffffffLL) fixed_arg = 0; XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0; mul64_Xsig(&argSqrd, &fixed_arg); XSIG_LL(argTo4) = XSIG_LL(argSqrd); argTo4.lsw = argSqrd.lsw; mul_Xsig_Xsig(&argTo4, &argTo4); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h, N_COEFF_NH - 1); mul_Xsig_Xsig(&accumulator, &argSqrd); negate_Xsig(&accumulator); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h, N_COEFF_PH - 1); negate_Xsig(&accumulator); mul64_Xsig(&accumulator, &fixed_arg); mul64_Xsig(&accumulator, &fixed_arg); shr_Xsig(&accumulator, 3); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &argSqrd); shr_Xsig(&accumulator, 1); accumulator.lsw |= 1; /* A zero accumulator here would cause problems */ negate_Xsig(&accumulator); /* The basic computation is complete. Now fix the answer to compensate for the error due to the approximation used for pi/2 */ /* This has an exponent of -65 */ fix_up = 0x898cc517; /* The fix-up needs to be improved for larger args */ if (argSqrd.msw & 0xffc00000) { /* Get about 32 bit precision in these: */ fix_up -= mul_32_32(0x898cc517, argSqrd.msw) / 6; } fix_up = mul_32_32(fix_up, LL_MSW(fixed_arg)); adj = accumulator.lsw; /* temp save */ accumulator.lsw -= fix_up; if (accumulator.lsw > adj) XSIG_LL(accumulator)--; echange = round_Xsig(&accumulator); setexponentpos(&result, echange - 1); } significand(&result) = XSIG_LL(accumulator); setsign(&result, getsign(st0_ptr)); FPU_copy_to_reg0(&result, TAG_Valid); #ifdef PARANOID if ((exponent(&result) >= 0) && (significand(&result) > 0x8000000000000000LL)) { EXCEPTION(EX_INTERNAL | 0x150); } #endif /* PARANOID */ } /*--- poly_cos() ------------------------------------------------------------+ | | +---------------------------------------------------------------------------*/ void poly_cos(FPU_REG *st0_ptr) { FPU_REG result; long int exponent, exp2, echange; Xsig accumulator, argSqrd, fix_up, argTo4; unsigned long long fixed_arg; #ifdef PARANOID if ((exponent(st0_ptr) > 0) || ((exponent(st0_ptr) == 0) && (significand(st0_ptr) > 0xc90fdaa22168c234LL))) { EXCEPTION(EX_Invalid); FPU_copy_to_reg0(&CONST_QNaN, TAG_Special); return; } #endif /* PARANOID */ exponent = exponent(st0_ptr); accumulator.lsw = accumulator.midw = accumulator.msw = 0; if ((exponent < -1) || ((exponent == -1) && (st0_ptr->sigh <= 0xb00d6f54))) { /* arg is < 0.687705 */ argSqrd.msw = st0_ptr->sigh; argSqrd.midw = st0_ptr->sigl; argSqrd.lsw = 0; mul64_Xsig(&argSqrd, &significand(st0_ptr)); if (exponent < -1) { /* shift the argument right by the required places */ shr_Xsig(&argSqrd, 2 * (-1 - exponent)); } argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw; argTo4.lsw = argSqrd.lsw; mul_Xsig_Xsig(&argTo4, &argTo4); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_h, N_COEFF_NH - 1); mul_Xsig_Xsig(&accumulator, &argSqrd); negate_Xsig(&accumulator); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_h, N_COEFF_PH - 1); negate_Xsig(&accumulator); mul64_Xsig(&accumulator, &significand(st0_ptr)); mul64_Xsig(&accumulator, &significand(st0_ptr)); shr_Xsig(&accumulator, -2 * (1 + exponent)); shr_Xsig(&accumulator, 3); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &argSqrd); shr_Xsig(&accumulator, 1); /* It doesn't matter if accumulator is all zero here, the following code will work ok */ negate_Xsig(&accumulator); if (accumulator.lsw & 0x80000000) XSIG_LL(accumulator)++; if (accumulator.msw == 0) { /* The result is 1.0 */ FPU_copy_to_reg0(&CONST_1, TAG_Valid); return; } else { significand(&result) = XSIG_LL(accumulator); /* will be a valid positive nr with expon = -1 */ setexponentpos(&result, -1); } } else { fixed_arg = significand(st0_ptr); if (exponent == 0) { /* The argument is >= 1.0 */ /* Put the binary point at the left. */ fixed_arg <<= 1; } /* pi/2 in hex is: 1.921fb54442d18469 898CC51701B839A2 52049C1 */ fixed_arg = 0x921fb54442d18469LL - fixed_arg; /* There is a special case which arises due to rounding, to fix here. */ if (fixed_arg == 0xffffffffffffffffLL) fixed_arg = 0; exponent = -1; exp2 = -1; /* A shift is needed here only for a narrow range of arguments, i.e. for fixed_arg approx 2^-32, but we pick up more... */ if (!(LL_MSW(fixed_arg) & 0xffff0000)) { fixed_arg <<= 16; exponent -= 16; exp2 -= 16; } XSIG_LL(argSqrd) = fixed_arg; argSqrd.lsw = 0; mul64_Xsig(&argSqrd, &fixed_arg); if (exponent < -1) { /* shift the argument right by the required places */ shr_Xsig(&argSqrd, 2 * (-1 - exponent)); } argTo4.msw = argSqrd.msw; argTo4.midw = argSqrd.midw; argTo4.lsw = argSqrd.lsw; mul_Xsig_Xsig(&argTo4, &argTo4); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), neg_terms_l, N_COEFF_N - 1); mul_Xsig_Xsig(&accumulator, &argSqrd); negate_Xsig(&accumulator); polynomial_Xsig(&accumulator, &XSIG_LL(argTo4), pos_terms_l, N_COEFF_P - 1); shr_Xsig(&accumulator, 2); /* Divide by four */ accumulator.msw |= 0x80000000; /* Add 1.0 */ mul64_Xsig(&accumulator, &fixed_arg); mul64_Xsig(&accumulator, &fixed_arg); mul64_Xsig(&accumulator, &fixed_arg); /* Divide by four, FPU_REG compatible, etc */ exponent = 3 * exponent; /* The minimum exponent difference is 3 */ shr_Xsig(&accumulator, exp2 - exponent); negate_Xsig(&accumulator); XSIG_LL(accumulator) += fixed_arg; /* The basic computation is complete. Now fix the answer to compensate for the error due to the approximation used for pi/2 */ /* This has an exponent of -65 */ XSIG_LL(fix_up) = 0x898cc51701b839a2ll; fix_up.lsw = 0; /* The fix-up needs to be improved for larger args */ if (argSqrd.msw & 0xffc00000) { /* Get about 32 bit precision in these: */ fix_up.msw -= mul_32_32(0x898cc517, argSqrd.msw) / 2; fix_up.msw += mul_32_32(0x898cc517, argTo4.msw) / 24; } exp2 += norm_Xsig(&accumulator); shr_Xsig(&accumulator, 1); /* Prevent overflow */ exp2++; shr_Xsig(&fix_up, 65 + exp2); add_Xsig_Xsig(&accumulator, &fix_up); echange = round_Xsig(&accumulator); setexponentpos(&result, exp2 + echange); significand(&result) = XSIG_LL(accumulator); } FPU_copy_to_reg0(&result, TAG_Valid); #ifdef PARANOID if ((exponent(&result) >= 0) && (significand(&result) > 0x8000000000000000LL)) { EXCEPTION(EX_INTERNAL | 0x151); } #endif /* PARANOID */ }
gpl-2.0
openwrt/bcm63xx-next
arch/x86/math-emu/poly_atan.c
14420
6375
/*---------------------------------------------------------------------------+ | poly_atan.c | | | | Compute the arctan of a FPU_REG, using a polynomial approximation. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "fpu_system.h" #include "status_w.h" #include "control_w.h" #include "poly.h" #define HIPOWERon 6 /* odd poly, negative terms */ static const unsigned long long oddnegterms[HIPOWERon] = { 0x0000000000000000LL, /* Dummy (not for - 1.0) */ 0x015328437f756467LL, 0x0005dda27b73dec6LL, 0x0000226bf2bfb91aLL, 0x000000ccc439c5f7LL, 0x0000000355438407LL }; #define HIPOWERop 6 /* odd poly, positive terms */ static const unsigned long long oddplterms[HIPOWERop] = { /* 0xaaaaaaaaaaaaaaabLL, transferred to fixedpterm[] */ 0x0db55a71875c9ac2LL, 0x0029fce2d67880b0LL, 0x0000dfd3908b4596LL, 0x00000550fd61dab4LL, 0x0000001c9422b3f9LL, 0x000000003e3301e1LL }; static const unsigned long long denomterm = 0xebd9b842c5c53a0eLL; static const Xsig fixedpterm = MK_XSIG(0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa); static const Xsig pi_signif = MK_XSIG(0xc90fdaa2, 0x2168c234, 0xc4c6628b); /*--- poly_atan() -----------------------------------------------------------+ | | +---------------------------------------------------------------------------*/ void poly_atan(FPU_REG *st0_ptr, u_char st0_tag, FPU_REG *st1_ptr, u_char st1_tag) { u_char transformed, inverted, sign1, sign2; int exponent; long int dummy_exp; Xsig accumulator, Numer, Denom, accumulatore, argSignif, argSq, argSqSq; u_char tag; sign1 = getsign(st0_ptr); sign2 = getsign(st1_ptr); if (st0_tag == TAG_Valid) { exponent = exponent(st0_ptr); } else { /* This gives non-compatible stack contents... */ FPU_to_exp16(st0_ptr, st0_ptr); exponent = exponent16(st0_ptr); } if (st1_tag == TAG_Valid) { exponent -= exponent(st1_ptr); } else { /* This gives non-compatible stack contents... */ FPU_to_exp16(st1_ptr, st1_ptr); exponent -= exponent16(st1_ptr); } if ((exponent < 0) || ((exponent == 0) && ((st0_ptr->sigh < st1_ptr->sigh) || ((st0_ptr->sigh == st1_ptr->sigh) && (st0_ptr->sigl < st1_ptr->sigl))))) { inverted = 1; Numer.lsw = Denom.lsw = 0; XSIG_LL(Numer) = significand(st0_ptr); XSIG_LL(Denom) = significand(st1_ptr); } else { inverted = 0; exponent = -exponent; Numer.lsw = Denom.lsw = 0; XSIG_LL(Numer) = significand(st1_ptr); XSIG_LL(Denom) = significand(st0_ptr); } div_Xsig(&Numer, &Denom, &argSignif); exponent += norm_Xsig(&argSignif); if ((exponent >= -1) || ((exponent == -2) && (argSignif.msw > 0xd413ccd0))) { /* The argument is greater than sqrt(2)-1 (=0.414213562...) */ /* Convert the argument by an identity for atan */ transformed = 1; if (exponent >= 0) { #ifdef PARANOID if (!((exponent == 0) && (argSignif.lsw == 0) && (argSignif.midw == 0) && (argSignif.msw == 0x80000000))) { EXCEPTION(EX_INTERNAL | 0x104); /* There must be a logic error */ return; } #endif /* PARANOID */ argSignif.msw = 0; /* Make the transformed arg -> 0.0 */ } else { Numer.lsw = Denom.lsw = argSignif.lsw; XSIG_LL(Numer) = XSIG_LL(Denom) = XSIG_LL(argSignif); if (exponent < -1) shr_Xsig(&Numer, -1 - exponent); negate_Xsig(&Numer); shr_Xsig(&Denom, -exponent); Denom.msw |= 0x80000000; div_Xsig(&Numer, &Denom, &argSignif); exponent = -1 + norm_Xsig(&argSignif); } } else { transformed = 0; } argSq.lsw = argSignif.lsw; argSq.midw = argSignif.midw; argSq.msw = argSignif.msw; mul_Xsig_Xsig(&argSq, &argSq); argSqSq.lsw = argSq.lsw; argSqSq.midw = argSq.midw; argSqSq.msw = argSq.msw; mul_Xsig_Xsig(&argSqSq, &argSqSq); accumulatore.lsw = argSq.lsw; XSIG_LL(accumulatore) = XSIG_LL(argSq); shr_Xsig(&argSq, 2 * (-1 - exponent - 1)); shr_Xsig(&argSqSq, 4 * (-1 - exponent - 1)); /* Now have argSq etc with binary point at the left .1xxxxxxxx */ /* Do the basic fixed point polynomial evaluation */ accumulator.msw = accumulator.midw = accumulator.lsw = 0; polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddplterms, HIPOWERop - 1); mul64_Xsig(&accumulator, &XSIG_LL(argSq)); negate_Xsig(&accumulator); polynomial_Xsig(&accumulator, &XSIG_LL(argSqSq), oddnegterms, HIPOWERon - 1); negate_Xsig(&accumulator); add_two_Xsig(&accumulator, &fixedpterm, &dummy_exp); mul64_Xsig(&accumulatore, &denomterm); shr_Xsig(&accumulatore, 1 + 2 * (-1 - exponent)); accumulatore.msw |= 0x80000000; div_Xsig(&accumulator, &accumulatore, &accumulator); mul_Xsig_Xsig(&accumulator, &argSignif); mul_Xsig_Xsig(&accumulator, &argSq); shr_Xsig(&accumulator, 3); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &argSignif); if (transformed) { /* compute pi/4 - accumulator */ shr_Xsig(&accumulator, -1 - exponent); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &pi_signif); exponent = -1; } if (inverted) { /* compute pi/2 - accumulator */ shr_Xsig(&accumulator, -exponent); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &pi_signif); exponent = 0; } if (sign1) { /* compute pi - accumulator */ shr_Xsig(&accumulator, 1 - exponent); negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &pi_signif); exponent = 1; } exponent += round_Xsig(&accumulator); significand(st1_ptr) = XSIG_LL(accumulator); setexponent16(st1_ptr, exponent); tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign2); FPU_settagi(1, tag); set_precision_flag_up(); /* We do not really know if up or down, use this as the default. */ }
gpl-2.0
arjen75/icecold-kernel
arch/x86/math-emu/poly_l2.c
14420
7242
/*---------------------------------------------------------------------------+ | poly_l2.c | | | | Compute the base 2 log of a FPU_REG, using a polynomial approximation. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "fpu_system.h" #include "control_w.h" #include "poly.h" static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig * accum_result, long int *expon); /*--- poly_l2() -------------------------------------------------------------+ | Base 2 logarithm by a polynomial approximation. | +---------------------------------------------------------------------------*/ void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign) { long int exponent, expon, expon_expon; Xsig accumulator, expon_accum, yaccum; u_char sign, argsign; FPU_REG x; int tag; exponent = exponent16(st0_ptr); /* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */ if (st0_ptr->sigh > (unsigned)0xb504f334) { /* Treat as sqrt(2)/2 < st0_ptr < 1 */ significand(&x) = -significand(st0_ptr); setexponent16(&x, -1); exponent++; argsign = SIGN_NEG; } else { /* Treat as 1 <= st0_ptr < sqrt(2) */ x.sigh = st0_ptr->sigh - 0x80000000; x.sigl = st0_ptr->sigl; setexponent16(&x, 0); argsign = SIGN_POS; } tag = FPU_normalize_nuo(&x); if (tag == TAG_Zero) { expon = 0; accumulator.msw = accumulator.midw = accumulator.lsw = 0; } else { log2_kernel(&x, argsign, &accumulator, &expon); } if (exponent < 0) { sign = SIGN_NEG; exponent = -exponent; } else sign = SIGN_POS; expon_accum.msw = exponent; expon_accum.midw = expon_accum.lsw = 0; if (exponent) { expon_expon = 31 + norm_Xsig(&expon_accum); shr_Xsig(&accumulator, expon_expon - expon); if (sign ^ argsign) negate_Xsig(&accumulator); add_Xsig_Xsig(&accumulator, &expon_accum); } else { expon_expon = expon; sign = argsign; } yaccum.lsw = 0; XSIG_LL(yaccum) = significand(st1_ptr); mul_Xsig_Xsig(&accumulator, &yaccum); expon_expon += round_Xsig(&accumulator); if (accumulator.msw == 0) { FPU_copy_to_reg1(&CONST_Z, TAG_Zero); return; } significand(st1_ptr) = XSIG_LL(accumulator); setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1); tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign); FPU_settagi(1, tag); set_precision_flag_up(); /* 80486 appears to always do this */ return; } /*--- poly_l2p1() -----------------------------------------------------------+ | Base 2 logarithm by a polynomial approximation. | | log2(x+1) | +---------------------------------------------------------------------------*/ int poly_l2p1(u_char sign0, u_char sign1, FPU_REG * st0_ptr, FPU_REG * st1_ptr, FPU_REG * dest) { u_char tag; long int exponent; Xsig accumulator, yaccum; if (exponent16(st0_ptr) < 0) { log2_kernel(st0_ptr, sign0, &accumulator, &exponent); yaccum.lsw = 0; XSIG_LL(yaccum) = significand(st1_ptr); mul_Xsig_Xsig(&accumulator, &yaccum); exponent += round_Xsig(&accumulator); exponent += exponent16(st1_ptr) + 1; if (exponent < EXP_WAY_UNDER) exponent = EXP_WAY_UNDER; significand(dest) = XSIG_LL(accumulator); setexponent16(dest, exponent); tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1); FPU_settagi(1, tag); if (tag == TAG_Valid) set_precision_flag_up(); /* 80486 appears to always do this */ } else { /* The magnitude of st0_ptr is far too large. */ if (sign0 != SIGN_POS) { /* Trying to get the log of a negative number. */ #ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */ changesign(st1_ptr); #else if (arith_invalid(1) < 0) return 1; #endif /* PECULIAR_486 */ } /* 80486 appears to do this */ if (sign0 == SIGN_NEG) set_precision_flag_down(); else set_precision_flag_up(); } if (exponent(dest) <= EXP_UNDER) EXCEPTION(EX_Underflow); return 0; } #undef HIPOWER #define HIPOWER 10 static const unsigned long long logterms[HIPOWER] = { 0x2a8eca5705fc2ef0LL, 0xf6384ee1d01febceLL, 0x093bb62877cdf642LL, 0x006985d8a9ec439bLL, 0x0005212c4f55a9c8LL, 0x00004326a16927f0LL, 0x0000038d1d80a0e7LL, 0x0000003141cc80c6LL, 0x00000002b1668c9fLL, 0x000000002c7a46aaLL }; static const unsigned long leadterm = 0xb8000000; /*--- log2_kernel() ---------------------------------------------------------+ | Base 2 logarithm by a polynomial approximation. | | log2(x+1) | +---------------------------------------------------------------------------*/ static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result, long int *expon) { long int exponent, adj; unsigned long long Xsq; Xsig accumulator, Numer, Denom, argSignif, arg_signif; exponent = exponent16(arg); Numer.lsw = Denom.lsw = 0; XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg); if (argsign == SIGN_POS) { shr_Xsig(&Denom, 2 - (1 + exponent)); Denom.msw |= 0x80000000; div_Xsig(&Numer, &Denom, &argSignif); } else { shr_Xsig(&Denom, 1 - (1 + exponent)); negate_Xsig(&Denom); if (Denom.msw & 0x80000000) { div_Xsig(&Numer, &Denom, &argSignif); exponent++; } else { /* Denom must be 1.0 */ argSignif.lsw = Numer.lsw; argSignif.midw = Numer.midw; argSignif.msw = Numer.msw; } } #ifndef PECULIAR_486 /* Should check here that |local_arg| is within the valid range */ if (exponent >= -2) { if ((exponent > -2) || (argSignif.msw > (unsigned)0xafb0ccc0)) { /* The argument is too large */ } } #endif /* PECULIAR_486 */ arg_signif.lsw = argSignif.lsw; XSIG_LL(arg_signif) = XSIG_LL(argSignif); adj = norm_Xsig(&argSignif); accumulator.lsw = argSignif.lsw; XSIG_LL(accumulator) = XSIG_LL(argSignif); mul_Xsig_Xsig(&accumulator, &accumulator); shr_Xsig(&accumulator, 2 * (-1 - (1 + exponent + adj))); Xsq = XSIG_LL(accumulator); if (accumulator.lsw & 0x80000000) Xsq++; accumulator.msw = accumulator.midw = accumulator.lsw = 0; /* Do the basic fixed point polynomial evaluation */ polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER - 1); mul_Xsig_Xsig(&accumulator, &argSignif); shr_Xsig(&accumulator, 6 - adj); mul32_Xsig(&arg_signif, leadterm); add_two_Xsig(&accumulator, &arg_signif, &exponent); *expon = exponent + 1; accum_result->lsw = accumulator.lsw; accum_result->midw = accumulator.midw; accum_result->msw = accumulator.msw; }
gpl-2.0
omnirom/android_kernel_samsung_t1
drivers/video/omap2/dss/wb.c
85
5226
/* * linux/drivers/video/omap2/dss/wb.c * Copyright (C) 2009 Texas Instruments * Author: mythripk <mythripk@ti.com> * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "WRITEBACK" #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/slab.h> #include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" static struct list_head wb_list; static struct attribute *writeback_sysfs_attrs[] = { NULL }; static ssize_t writeback_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { return 0; } static ssize_t writeback_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { return 0; } static const struct sysfs_ops writeback_sysfs_ops = { .show = writeback_attr_show, .store = writeback_attr_store, }; static struct kobj_type writeback_ktype = { .sysfs_ops = &writeback_sysfs_ops, .default_attrs = writeback_sysfs_attrs, }; bool omap_dss_check_wb(struct writeback_cache_data *wb, int overlayId, int managerId) { bool result = false; DSSDBG("ovl=%d,mgr=%d,mode=%s(%s),src=%d\n", overlayId, managerId, wb->mode == OMAP_WB_MEM2MEM_MODE ? "mem2mem" : "capture", wb->source >= OMAP_WB_GFX ? "overlay" : "manager", wb->source); if ((wb->mode == OMAP_WB_MEM2MEM_MODE) && ((wb->source - 3) == overlayId)) result = true; else if (wb->mode == OMAP_WB_MEM2MEM_MODE && wb->source < OMAP_WB_GFX && managerId == wb->source) { result = true; } else if (wb->mode == OMAP_WB_CAPTURE_MODE) { switch (wb->source) { case OMAP_WB_LCD1: if (managerId == OMAP_DSS_CHANNEL_LCD) result = true; break; case OMAP_WB_LCD2: if (managerId == OMAP_DSS_CHANNEL_LCD2) result = true; break; case OMAP_WB_TV: if (managerId == OMAP_DSS_CHANNEL_DIGIT) result = true; break; case OMAP_WB_GFX: case OMAP_WB_VID1: case OMAP_WB_VID2: case OMAP_WB_VID3: break; } } return result; } static bool dss_check_wb(struct omap_writeback *wb) { DSSDBG("srctype=%s,src=%d\n", wb->info.source >= OMAP_WB_GFX ? "overlay" : "manager", wb->info.source); return 0; } static int omap_dss_wb_set_info(struct omap_writeback *wb, struct omap_writeback_info *info) { int r; struct omap_writeback_info old_info; old_info = wb->info; wb->info = *info; r = dss_check_wb(wb); if (r) { wb->info = old_info; return r; } wb->info_dirty = true; return 0; } static void wb_irq_handler(void *data, u32 mask) { complete((struct completion *)data); omap_dispc_unregister_isr(wb_irq_handler, (struct completion *)data, DISPC_IRQ_FRAMEDONE_WB); } static int omap_dss_wb_wait_framedone(struct omap_writeback *wb) { int timeout = wait_for_completion_timeout(&wb->wb_completion, msecs_to_jiffies(50)); if (timeout == 0) return -ETIMEDOUT; if (timeout == -ERESTARTSYS) return -ERESTARTSYS; return 0; } static int omap_dss_wb_register_framedone(struct omap_writeback *wb) { INIT_COMPLETION(wb->wb_completion); return omap_dispc_register_isr(wb_irq_handler, &wb->wb_completion, DISPC_IRQ_FRAMEDONE_WB); } static void omap_dss_wb_get_info(struct omap_writeback *wb, struct omap_writeback_info *info) { *info = wb->info; } struct omap_writeback *omap_dss_get_wb(int num) { int i = 0; struct omap_writeback *wb; list_for_each_entry(wb, &wb_list, list) { if (i++ == num) return wb; } return NULL; } EXPORT_SYMBOL(omap_dss_get_wb); static __attribute__ ((unused)) void omap_dss_add_wb(struct omap_writeback *wb) { list_add_tail(&wb->list, &wb_list); } void dss_init_writeback(struct platform_device *pdev) { int r; struct omap_writeback *wb; INIT_LIST_HEAD(&wb_list); wb = kzalloc(sizeof(*wb), GFP_KERNEL); BUG_ON(wb == NULL); wb->check_wb = &dss_check_wb; wb->set_wb_info = &omap_dss_wb_set_info; wb->get_wb_info = &omap_dss_wb_get_info; wb->register_framedone = &omap_dss_wb_register_framedone; wb->wait_framedone = &omap_dss_wb_wait_framedone; mutex_init(&wb->lock); init_completion(&wb->wb_completion); omap_dss_add_wb(wb); r = kobject_init_and_add(&wb->kobj, &writeback_ktype, &pdev->dev.kobj, "writeback%d", 0); if (r) DSSERR("failed to create sysfs file\n"); } void dss_uninit_writeback(struct platform_device *pdev) { struct omap_writeback *wb; while (!list_empty(&wb_list)) { wb = list_first_entry(&wb_list, struct omap_writeback, list); list_del(&wb->list); kobject_del(&wb->kobj); kobject_put(&wb->kobj); kfree(wb); } }
gpl-2.0
Victor-android/kernel_u8800
fs/ext4/resize.c
341
34901
/* * linux/fs/ext4/resize.c * * Support for resizing an ext4 filesystem while it is mounted. * * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com> * * This could probably be made into a module, because it is not often in use. */ #define EXT4FS_DEBUG #include <linux/errno.h> #include <linux/slab.h> #include "ext4_jbd2.h" #define outside(b, first, last) ((b) < (first) || (b) >= (last)) #define inside(b, first, last) ((b) >= (first) && (b) < (last)) static int verify_group_input(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t start = ext4_blocks_count(es); ext4_fsblk_t end = start + input->blocks_count; ext4_group_t group = input->group; ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group; unsigned overhead = ext4_bg_has_super(sb, group) ? (1 + ext4_bg_num_gdb(sb, group) + le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; ext4_fsblk_t metaend = start + overhead; struct buffer_head *bh = NULL; ext4_grpblk_t free_blocks_count, offset; int err = -EINVAL; input->free_blocks_count = free_blocks_count = input->blocks_count - 2 - overhead - sbi->s_itb_per_group; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " "(%d free, %u reserved)\n", ext4_bg_has_super(sb, input->group) ? "normal" : "no-super", input->group, input->blocks_count, free_blocks_count, input->reserved_blocks); ext4_get_group_no_and_offset(sb, start, NULL, &offset); if (group != sbi->s_groups_count) ext4_warning(sb, __func__, "Cannot add at group %u (only %u groups)", input->group, sbi->s_groups_count); else if (offset != 0) ext4_warning(sb, __func__, "Last group not full"); else if (input->reserved_blocks > input->blocks_count / 5) ext4_warning(sb, __func__, "Reserved blocks too high (%u)", input->reserved_blocks); else if (free_blocks_count < 0) ext4_warning(sb, __func__, "Bad blocks count %u", input->blocks_count); else if (!(bh = sb_bread(sb, end - 1))) ext4_warning(sb, __func__, "Cannot read last block (%llu)", end - 1); else if (outside(input->block_bitmap, start, end)) ext4_warning(sb, __func__, "Block bitmap not in group (block %llu)", (unsigned long long)input->block_bitmap); else if (outside(input->inode_bitmap, start, end)) ext4_warning(sb, __func__, "Inode bitmap not in group (block %llu)", (unsigned long long)input->inode_bitmap); else if (outside(input->inode_table, start, end) || outside(itend - 1, start, end)) ext4_warning(sb, __func__, "Inode table not in group (blocks %llu-%llu)", (unsigned long long)input->inode_table, itend - 1); else if (input->inode_bitmap == input->block_bitmap) ext4_warning(sb, __func__, "Block bitmap same as inode bitmap (%llu)", (unsigned long long)input->block_bitmap); else if (inside(input->block_bitmap, input->inode_table, itend)) ext4_warning(sb, __func__, "Block bitmap (%llu) in inode table (%llu-%llu)", (unsigned long long)input->block_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->inode_bitmap, input->inode_table, itend)) ext4_warning(sb, __func__, "Inode bitmap (%llu) in inode table (%llu-%llu)", (unsigned long long)input->inode_bitmap, (unsigned long long)input->inode_table, itend - 1); else if (inside(input->block_bitmap, start, metaend)) ext4_warning(sb, __func__, "Block bitmap (%llu) in GDT table" " (%llu-%llu)", (unsigned long long)input->block_bitmap, start, metaend - 1); else if (inside(input->inode_bitmap, start, metaend)) ext4_warning(sb, __func__, "Inode bitmap (%llu) in GDT table" " (%llu-%llu)", (unsigned long long)input->inode_bitmap, start, metaend - 1); else if (inside(input->inode_table, start, metaend) || inside(itend - 1, start, metaend)) ext4_warning(sb, __func__, "Inode table (%llu-%llu) overlaps" "GDT table (%llu-%llu)", (unsigned long long)input->inode_table, itend - 1, start, metaend - 1); else err = 0; brelse(bh); return err; } static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, ext4_fsblk_t blk) { struct buffer_head *bh; int err; bh = sb_getblk(sb, blk); if (!bh) return ERR_PTR(-EIO); if ((err = ext4_journal_get_write_access(handle, bh))) { brelse(bh); bh = ERR_PTR(err); } else { lock_buffer(bh); memset(bh->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bh); unlock_buffer(bh); } return bh; } /* * If we have fewer than thresh credits, extend by EXT4_MAX_TRANS_DATA. * If that fails, restart the transaction & regain write access for the * buffer head which is used for block_bitmap modifications. */ static int extend_or_restart_transaction(handle_t *handle, int thresh, struct buffer_head *bh) { int err; if (ext4_handle_has_enough_credits(handle, thresh)) return 0; err = ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA); if (err < 0) return err; if (err) { if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) return err; if ((err = ext4_journal_get_write_access(handle, bh))) return err; } return 0; } /* * Set up the block and inode bitmaps, and the inode table for the new group. * This doesn't need to be part of the main transaction, since we are only * changing blocks outside the actual filesystem. We still do journaling to * ensure the recovery is correct in case of a failure just after resize. * If any part of this fails, we simply abort the resize. */ static int setup_new_group_blocks(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group); int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0; unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group); struct buffer_head *bh; handle_t *handle; ext4_fsblk_t block; ext4_grpblk_t bit; int i; int err = 0, err2; /* This transaction may be extended/restarted along the way */ handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) return PTR_ERR(handle); mutex_lock(&sbi->s_resize_lock); if (input->group != sbi->s_groups_count) { err = -EBUSY; goto exit_journal; } if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) { err = PTR_ERR(bh); goto exit_journal; } if (ext4_bg_has_super(sb, input->group)) { ext4_debug("mark backup superblock %#04llx (+0)\n", start); ext4_set_bit(0, bh->b_data); } /* Copy all of the GDT blocks into the backup in this group */ for (i = 0, bit = 1, block = start + 1; i < gdblocks; i++, block++, bit++) { struct buffer_head *gdb; ext4_debug("update backup group %#04llx (+%d)\n", block, bit); if ((err = extend_or_restart_transaction(handle, 1, bh))) goto exit_bh; gdb = sb_getblk(sb, block); if (!gdb) { err = -EIO; goto exit_bh; } if ((err = ext4_journal_get_write_access(handle, gdb))) { brelse(gdb); goto exit_bh; } lock_buffer(gdb); memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); set_buffer_uptodate(gdb); unlock_buffer(gdb); ext4_handle_dirty_metadata(handle, NULL, gdb); ext4_set_bit(bit, bh->b_data); brelse(gdb); } /* Zero out all of the reserved backup group descriptor table blocks */ for (i = 0, bit = gdblocks + 1, block = start + bit; i < reserved_gdb; i++, block++, bit++) { struct buffer_head *gdb; ext4_debug("clear reserved block %#04llx (+%d)\n", block, bit); if ((err = extend_or_restart_transaction(handle, 1, bh))) goto exit_bh; if (IS_ERR(gdb = bclean(handle, sb, block))) { err = PTR_ERR(gdb); goto exit_bh; } ext4_handle_dirty_metadata(handle, NULL, gdb); ext4_set_bit(bit, bh->b_data); brelse(gdb); } ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, input->block_bitmap - start); ext4_set_bit(input->block_bitmap - start, bh->b_data); ext4_debug("mark inode bitmap %#04llx (+%llu)\n", input->inode_bitmap, input->inode_bitmap - start); ext4_set_bit(input->inode_bitmap - start, bh->b_data); /* Zero out all of the inode table blocks */ for (i = 0, block = input->inode_table, bit = block - start; i < sbi->s_itb_per_group; i++, bit++, block++) { struct buffer_head *it; ext4_debug("clear inode block %#04llx (+%d)\n", block, bit); if ((err = extend_or_restart_transaction(handle, 1, bh))) goto exit_bh; if (IS_ERR(it = bclean(handle, sb, block))) { err = PTR_ERR(it); goto exit_bh; } ext4_handle_dirty_metadata(handle, NULL, it); brelse(it); ext4_set_bit(bit, bh->b_data); } if ((err = extend_or_restart_transaction(handle, 2, bh))) goto exit_bh; mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data); ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); /* Mark unused entries in inode bitmap used */ ext4_debug("clear inode bitmap %#04llx (+%llu)\n", input->inode_bitmap, input->inode_bitmap - start); if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) { err = PTR_ERR(bh); goto exit_journal; } mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); ext4_handle_dirty_metadata(handle, NULL, bh); exit_bh: brelse(bh); exit_journal: mutex_unlock(&sbi->s_resize_lock); if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; return err; } /* * Iterate through the groups which hold BACKUP superblock/GDT copies in an * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before * calling this for the first time. In a sparse filesystem it will be the * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ... * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... */ static unsigned ext4_list_backups(struct super_block *sb, unsigned *three, unsigned *five, unsigned *seven) { unsigned *min = three; int mult = 3; unsigned ret; if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ret = *min; *min += 1; return ret; } if (*five < *min) { min = five; mult = 5; } if (*seven < *min) { min = seven; mult = 7; } ret = *min; *min *= mult; return ret; } /* * Check that all of the backup GDT blocks are held in the primary GDT block. * It is assumed that they are stored in group order. Returns the number of * groups in current filesystem that have BACKUPS, or -ve error code. */ static int verify_reserved_gdb(struct super_block *sb, struct buffer_head *primary) { const ext4_fsblk_t blk = primary->b_blocknr; const ext4_group_t end = EXT4_SB(sb)->s_groups_count; unsigned three = 1; unsigned five = 5; unsigned seven = 7; unsigned grp; __le32 *p = (__le32 *)primary->b_data; int gdbackups = 0; while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) { if (le32_to_cpu(*p++) != grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){ ext4_warning(sb, __func__, "reserved GDT %llu" " missing grp %d (%llu)", blk, grp, grp * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) + blk); return -EINVAL; } if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb)) return -EFBIG; } return gdbackups; } /* * Called when we need to bring a reserved group descriptor table block into * use from the resize inode. The primary copy of the new GDT block currently * is an indirect block (under the double indirect block in the resize inode). * The new backup GDT blocks will be stored as leaf blocks in this indirect * block, in group order. Even though we know all the block numbers we need, * we check to ensure that the resize inode has actually reserved these blocks. * * Don't need to update the block bitmaps because the blocks are still in use. * * We get all of the error cases out of the way, so that we are sure to not * fail once we start modifying the data on disk, because JBD has no rollback. */ static int add_new_gdb(handle_t *handle, struct inode *inode, struct ext4_new_group_data *input, struct buffer_head **primary) { struct super_block *sb = inode->i_sb; struct ext4_super_block *es = EXT4_SB(sb)->s_es; unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num; struct buffer_head **o_group_desc, **n_group_desc; struct buffer_head *dind; int gdbackups; struct ext4_iloc iloc; __le32 *data; int err; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", gdb_num); /* * If we are not using the primary superblock/GDT copy don't resize, * because the user tools have no way of handling this. Probably a * bad time to do it anyways. */ if (EXT4_SB(sb)->s_sbh->b_blocknr != le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { ext4_warning(sb, __func__, "won't resize using backup superblock at %llu", (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); return -EPERM; } *primary = sb_bread(sb, gdblock); if (!*primary) return -EIO; if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) { err = gdbackups; goto exit_bh; } data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_bh; } data = (__le32 *)dind->b_data; if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) { ext4_warning(sb, __func__, "new group %u GDT block %llu not reserved", input->group, gdblock); err = -EINVAL; goto exit_dind; } if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) goto exit_dind; if ((err = ext4_journal_get_write_access(handle, *primary))) goto exit_sbh; if ((err = ext4_journal_get_write_access(handle, dind))) goto exit_primary; /* ext4_reserve_inode_write() gets a reference on the iloc */ if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) goto exit_dindj; n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), GFP_NOFS); if (!n_group_desc) { err = -ENOMEM; ext4_warning(sb, __func__, "not enough memory for %lu groups", gdb_num + 1); goto exit_inode; } /* * Finally, we have all of the possible failures behind us... * * Remove new GDT block from inode double-indirect block and clear out * the new GDT block for use (which also "frees" the backup GDT blocks * from the reserved inode). We don't need to change the bitmaps for * these blocks, because they are marked as in-use from being in the * reserved inode, and will become GDT blocks (primary and backup). */ data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; ext4_handle_dirty_metadata(handle, NULL, dind); brelse(dind); inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); memset((*primary)->b_data, 0, sb->s_blocksize); ext4_handle_dirty_metadata(handle, NULL, *primary); o_group_desc = EXT4_SB(sb)->s_group_desc; memcpy(n_group_desc, o_group_desc, EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); n_group_desc[gdb_num] = *primary; EXT4_SB(sb)->s_group_desc = n_group_desc; EXT4_SB(sb)->s_gdb_count++; kfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); return 0; exit_inode: /* ext4_journal_release_buffer(handle, iloc.bh); */ brelse(iloc.bh); exit_dindj: /* ext4_journal_release_buffer(handle, dind); */ exit_primary: /* ext4_journal_release_buffer(handle, *primary); */ exit_sbh: /* ext4_journal_release_buffer(handle, *primary); */ exit_dind: brelse(dind); exit_bh: brelse(*primary); ext4_debug("leaving with error %d\n", err); return err; } /* * Called when we are adding a new group which has a backup copy of each of * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. * We need to add these reserved backup GDT blocks to the resize inode, so * that they are kept for future resizing and not allocated to files. * * Each reserved backup GDT block will go into a different indirect block. * The indirect blocks are actually the primary reserved GDT blocks, * so we know in advance what their block numbers are. We only get the * double-indirect block to verify it is pointing to the primary reserved * GDT blocks so we don't overwrite a data block by accident. The reserved * backup GDT blocks are stored in their reserved primary GDT block. */ static int reserve_backup_gdb(handle_t *handle, struct inode *inode, struct ext4_new_group_data *input) { struct super_block *sb = inode->i_sb; int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks); struct buffer_head **primary; struct buffer_head *dind; struct ext4_iloc iloc; ext4_fsblk_t blk; __le32 *data, *end; int gdbackups = 0; int res, i; int err; primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS); if (!primary) return -ENOMEM; data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK; dind = sb_bread(sb, le32_to_cpu(*data)); if (!dind) { err = -EIO; goto exit_free; } blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count; data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count % EXT4_ADDR_PER_BLOCK(sb)); end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb); /* Get each reserved primary GDT block and verify it holds backups */ for (res = 0; res < reserved_gdb; res++, blk++) { if (le32_to_cpu(*data) != blk) { ext4_warning(sb, __func__, "reserved block %llu" " not at offset %ld", blk, (long)(data - (__le32 *)dind->b_data)); err = -EINVAL; goto exit_bh; } primary[res] = sb_bread(sb, blk); if (!primary[res]) { err = -EIO; goto exit_bh; } if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) { brelse(primary[res]); err = gdbackups; goto exit_bh; } if (++data >= end) data = (__le32 *)dind->b_data; } for (i = 0; i < reserved_gdb; i++) { if ((err = ext4_journal_get_write_access(handle, primary[i]))) { /* int j; for (j = 0; j < i; j++) ext4_journal_release_buffer(handle, primary[j]); */ goto exit_bh; } } if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) goto exit_bh; /* * Finally we can add each of the reserved backup GDT blocks from * the new group to its reserved primary GDT block. */ blk = input->group * EXT4_BLOCKS_PER_GROUP(sb); for (i = 0; i < reserved_gdb; i++) { int err2; data = (__le32 *)primary[i]->b_data; /* printk("reserving backup %lu[%u] = %lu\n", primary[i]->b_blocknr, gdbackups, blk + primary[i]->b_blocknr); */ data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr); err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]); if (!err) err = err2; } inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); exit_bh: while (--res >= 0) brelse(primary[res]); brelse(dind); exit_free: kfree(primary); return err; } /* * Update the backup copies of the ext4 metadata. These don't need to be part * of the main resize transaction, because e2fsck will re-write them if there * is a problem (basically only OOM will cause a problem). However, we * _should_ update the backups if possible, in case the primary gets trashed * for some reason and we need to run e2fsck from a backup superblock. The * important part is that the new block and inode counts are in the backup * superblocks, and the location of the new group metadata in the GDT backups. * * We do not need take the s_resize_lock for this, because these * blocks are not otherwise touched by the filesystem code when it is * mounted. We don't need to worry about last changing from * sbi->s_groups_count, because the worst that can happen is that we * do not copy the full number of backups at this time. The resize * which changed s_groups_count will backup again. */ static void update_backups(struct super_block *sb, int blk_off, char *data, int size) { struct ext4_sb_info *sbi = EXT4_SB(sb); const ext4_group_t last = sbi->s_groups_count; const int bpg = EXT4_BLOCKS_PER_GROUP(sb); unsigned three = 1; unsigned five = 5; unsigned seven = 7; ext4_group_t group; int rest = sb->s_blocksize - size; handle_t *handle; int err = 0, err2; handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA); if (IS_ERR(handle)) { group = 1; err = PTR_ERR(handle); goto exit_err; } while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { struct buffer_head *bh; /* Out of journal space, and can't get more - abort - so sad */ if (ext4_handle_valid(handle) && handle->h_buffer_credits == 0 && ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) && (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA))) break; bh = sb_getblk(sb, group * bpg + blk_off); if (!bh) { err = -EIO; break; } ext4_debug("update metadata backup %#04lx\n", (unsigned long)bh->b_blocknr); if ((err = ext4_journal_get_write_access(handle, bh))) break; lock_buffer(bh); memcpy(bh->b_data, data, size); if (rest) memset(bh->b_data + size, 0, rest); set_buffer_uptodate(bh); unlock_buffer(bh); ext4_handle_dirty_metadata(handle, NULL, bh); brelse(bh); } if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; /* * Ugh! Need to have e2fsck write the backup copies. It is too * late to revert the resize, we shouldn't fail just because of * the backup copies (they are only needed in case of corruption). * * However, if we got here we have a journal problem too, so we * can't really start a transaction to mark the superblock. * Chicken out and just set the flag on the hope it will be written * to disk, and if not - we will simply wait until next fsck. */ exit_err: if (err) { ext4_warning(sb, __func__, "can't update backup for group %u (err %d), " "forcing fsck on next reboot", group, err); sbi->s_mount_state &= ~EXT4_VALID_FS; sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS); mark_buffer_dirty(sbi->s_sbh); } } /* Add group descriptor data to an existing or new group descriptor block. * Ensure we handle all possible error conditions _before_ we start modifying * the filesystem, because we cannot abort the transaction and not have it * write the data to disk. * * If we are on a GDT block boundary, we need to get the reserved GDT block. * Otherwise, we may need to add backup GDT blocks for a sparse group. * * We only need to hold the superblock lock while we are actually adding * in the new group's counts to the superblock. Prior to that we have * not really "added" the group at all. We re-check that we are still * adding in the last group in case things have changed since verifying. */ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int reserved_gdb = ext4_bg_has_super(sb, input->group) ? le16_to_cpu(es->s_reserved_gdt_blocks) : 0; struct buffer_head *primary = NULL; struct ext4_group_desc *gdp; struct inode *inode = NULL; handle_t *handle; int gdb_off, gdb_num; int err, err2; gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) { ext4_warning(sb, __func__, "Can't resize non-sparse filesystem further"); return -EPERM; } if (ext4_blocks_count(es) + input->blocks_count < ext4_blocks_count(es)) { ext4_warning(sb, __func__, "blocks_count overflow"); return -EINVAL; } if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) < le32_to_cpu(es->s_inodes_count)) { ext4_warning(sb, __func__, "inodes_count overflow"); return -EINVAL; } if (reserved_gdb || gdb_off == 0) { if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_RESIZE_INODE) || !le16_to_cpu(es->s_reserved_gdt_blocks)) { ext4_warning(sb, __func__, "No reserved GDT blocks, can't resize"); return -EPERM; } inode = ext4_iget(sb, EXT4_RESIZE_INO); if (IS_ERR(inode)) { ext4_warning(sb, __func__, "Error opening resize inode"); return PTR_ERR(inode); } } if ((err = verify_group_input(sb, input))) goto exit_put; if ((err = setup_new_group_blocks(sb, input))) goto exit_put; /* * We will always be modifying at least the superblock and a GDT * block. If we are adding a group past the last current GDT block, * we will also modify the inode and the dindirect block. If we * are adding a group with superblock/GDT backups we will also * modify each of the reserved GDT dindirect blocks. */ handle = ext4_journal_start_sb(sb, ext4_bg_has_super(sb, input->group) ? 3 + reserved_gdb : 4); if (IS_ERR(handle)) { err = PTR_ERR(handle); goto exit_put; } mutex_lock(&sbi->s_resize_lock); if (input->group != sbi->s_groups_count) { ext4_warning(sb, __func__, "multiple resizers run on filesystem!"); err = -EBUSY; goto exit_journal; } if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh))) goto exit_journal; /* * We will only either add reserved group blocks to a backup group * or remove reserved blocks for the first group in a new group block. * Doing both would be mean more complex code, and sane people don't * use non-sparse filesystems anymore. This is already checked above. */ if (gdb_off) { primary = sbi->s_group_desc[gdb_num]; if ((err = ext4_journal_get_write_access(handle, primary))) goto exit_journal; if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) && (err = reserve_backup_gdb(handle, inode, input))) goto exit_journal; } else if ((err = add_new_gdb(handle, inode, input, &primary))) goto exit_journal; /* * OK, now we've set up the new group. Time to make it active. * * We do not lock all allocations via s_resize_lock * so we have to be safe wrt. concurrent accesses the group * data. So we need to be careful to set all of the relevant * group descriptor data etc. *before* we enable the group. * * The key field here is sbi->s_groups_count: as long as * that retains its old value, nobody is going to access the new * group. * * So first we update all the descriptor metadata for the new * group; then we update the total disk blocks count; then we * update the groups count to enable the group; then finally we * update the free space counts so that the system can start * using the new disk blocks. */ /* Update group descriptor block for new group */ gdp = (struct ext4_group_desc *)((char *)primary->b_data + gdb_off * EXT4_DESC_SIZE(sb)); memset(gdp, 0, EXT4_DESC_SIZE(sb)); ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */ ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */ ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */ ext4_free_blks_set(sb, gdp, input->free_blocks_count); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp); /* * We can allocate memory for mb_alloc based on the new group * descriptor */ err = ext4_mb_add_groupinfo(sb, input->group, gdp); if (err) goto exit_journal; /* * Make the new blocks and inodes valid next. We do this before * increasing the group count so that once the group is enabled, * all of its blocks and inodes are already valid. * * We always allocate group-by-group, then block-by-block or * inode-by-inode within a group, so enabling these * blocks/inodes before the group is live won't actually let us * allocate the new space yet. */ ext4_blocks_count_set(es, ext4_blocks_count(es) + input->blocks_count); le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb)); /* * We need to protect s_groups_count against other CPUs seeing * inconsistent state in the superblock. * * The precise rules we use are: * * * Writers of s_groups_count *must* hold s_resize_lock * AND * * Writers must perform a smp_wmb() after updating all dependent * data and before modifying the groups count * * * Readers must hold s_resize_lock over the access * OR * * Readers must perform an smp_rmb() after reading the groups count * and before reading any dependent data. * * NB. These rules can be relaxed when checking the group count * while freeing data, as we can only allocate from a block * group after serialising against the group count, and we can * only then free after serialising in turn against that * allocation. */ smp_wmb(); /* Update the global fs size fields */ sbi->s_groups_count++; ext4_handle_dirty_metadata(handle, NULL, primary); /* Update the reserved block counts only once the new group is * active. */ ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) + input->reserved_blocks); /* Update the free space counts */ percpu_counter_add(&sbi->s_freeblocks_counter, input->free_blocks_count); percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb)); if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, input->group); atomic_add(input->free_blocks_count, &sbi->s_flex_groups[flex_group].free_blocks); atomic_add(EXT4_INODES_PER_GROUP(sb), &sbi->s_flex_groups[flex_group].free_inodes); } ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); sb->s_dirt = 1; exit_journal: mutex_unlock(&sbi->s_resize_lock); if ((err2 = ext4_journal_stop(handle)) && !err) err = err2; if (!err) { update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); update_backups(sb, primary->b_blocknr, primary->b_data, primary->b_size); } exit_put: iput(inode); return err; } /* ext4_group_add */ /* * Extend the filesystem to the new number of blocks specified. This entry * point is only used to extend the current filesystem to the end of the last * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" * for emergencies (because it has no dependencies on reserved blocks). * * If we _really_ wanted, we could use default values to call ext4_group_add() * allow the "remount" trick to work for arbitrary resizing, assuming enough * GDT blocks are reserved to grow to the desired size. */ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, ext4_fsblk_t n_blocks_count) { ext4_fsblk_t o_blocks_count; ext4_group_t o_groups_count; ext4_grpblk_t last; ext4_grpblk_t add; struct buffer_head *bh; handle_t *handle; int err; ext4_group_t group; /* We don't need to worry about locking wrt other resizers just * yet: we're going to revalidate es->s_blocks_count after * taking the s_resize_lock below. */ o_blocks_count = ext4_blocks_count(es); o_groups_count = EXT4_SB(sb)->s_groups_count; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n", o_blocks_count, n_blocks_count); if (n_blocks_count == 0 || n_blocks_count == o_blocks_count) return 0; if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { printk(KERN_ERR "EXT4-fs: filesystem on %s:" " too large to resize to %llu blocks safely\n", sb->s_id, n_blocks_count); if (sizeof(sector_t) < 8) ext4_warning(sb, __func__, "CONFIG_LBDAF not enabled"); return -EINVAL; } if (n_blocks_count < o_blocks_count) { ext4_warning(sb, __func__, "can't shrink FS - resize aborted"); return -EBUSY; } /* Handle the remaining blocks in the last group only. */ ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); if (last == 0) { ext4_warning(sb, __func__, "need to use ext2online to resize further"); return -EPERM; } add = EXT4_BLOCKS_PER_GROUP(sb) - last; if (o_blocks_count + add < o_blocks_count) { ext4_warning(sb, __func__, "blocks_count overflow"); return -EINVAL; } if (o_blocks_count + add > n_blocks_count) add = n_blocks_count - o_blocks_count; if (o_blocks_count + add < n_blocks_count) ext4_warning(sb, __func__, "will only finish group (%llu" " blocks, %u new)", o_blocks_count + add, add); /* See if the device is actually as big as what was requested */ bh = sb_bread(sb, o_blocks_count + add - 1); if (!bh) { ext4_warning(sb, __func__, "can't read last block, resize aborted"); return -ENOSPC; } brelse(bh); /* We will update the superblock, one block bitmap, and * one group descriptor via ext4_free_blocks(). */ handle = ext4_journal_start_sb(sb, 3); if (IS_ERR(handle)) { err = PTR_ERR(handle); ext4_warning(sb, __func__, "error %d on journal start", err); goto exit_put; } mutex_lock(&EXT4_SB(sb)->s_resize_lock); if (o_blocks_count != ext4_blocks_count(es)) { ext4_warning(sb, __func__, "multiple resizers run on filesystem!"); mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_journal_stop(handle); err = -EBUSY; goto exit_put; } if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) { ext4_warning(sb, __func__, "error %d on journal write access", err); mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_journal_stop(handle); goto exit_put; } ext4_blocks_count_set(es, o_blocks_count + add); ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); sb->s_dirt = 1; mutex_unlock(&EXT4_SB(sb)->s_resize_lock); ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); /* We add the blocks to the bitmap and set the group need init bit */ ext4_add_groupblocks(handle, sb, o_blocks_count, add); ext4_debug("freed blocks %llu through %llu\n", o_blocks_count, o_blocks_count + add); if ((err = ext4_journal_stop(handle))) goto exit_put; if (test_opt(sb, DEBUG)) printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n", ext4_blocks_count(es)); update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es, sizeof(struct ext4_super_block)); exit_put: return err; } /* ext4_group_extend */
gpl-2.0
G5Devs/android_kernel_lge_msm8996
drivers/regulator/ab8500-ext.c
853
12260
/* * Copyright (C) ST-Ericsson SA 2010 * * License Terms: GNU General Public License v2 * * Authors: Bengt Jonsson <bengt.g.jonsson@stericsson.com> * * This file is based on drivers/regulator/ab8500.c * * AB8500 external regulators * * ab8500-ext supports the following regulators: * - VextSupply3 */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/regulator/ab8500.h> /** * struct ab8500_ext_regulator_info - ab8500 regulator information * @dev: device pointer * @desc: regulator description * @rdev: regulator device * @cfg: regulator configuration (extension of regulator FW configuration) * @update_bank: bank to control on/off * @update_reg: register to control on/off * @update_mask: mask to enable/disable and set mode of regulator * @update_val: bits holding the regulator current mode * @update_val_hp: bits to set EN pin active (LPn pin deactive) * normally this means high power mode * @update_val_lp: bits to set EN pin active and LPn pin active * normally this means low power mode * @update_val_hw: bits to set regulator pins in HW control * SysClkReq pins and logic will choose mode */ struct ab8500_ext_regulator_info { struct device *dev; struct regulator_desc desc; struct regulator_dev *rdev; struct ab8500_ext_regulator_cfg *cfg; u8 update_bank; u8 update_reg; u8 update_mask; u8 update_val; u8 update_val_hp; u8 update_val_lp; u8 update_val_hw; }; static int ab8500_ext_regulator_enable(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } /* * To satisfy both HW high power request and SW request, the regulator * must be on in high power. */ if (info->cfg && info->cfg->hwreq) regval = info->update_val_hp; else regval = info->update_val; ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(info->rdev), "couldn't set enable bits for regulator\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-enable (bank, reg, mask, value): 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); return 0; } static int ab8500_ext_regulator_disable(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } /* * Set the regulator in HW request mode if configured */ if (info->cfg && info->cfg->hwreq) regval = info->update_val_hw; else regval = 0; ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(info->rdev), "couldn't set disable bits for regulator\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-disable (bank, reg, mask, value):" " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); return 0; } static int ab8500_ext_regulator_is_enabled(struct regulator_dev *rdev) { int ret; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } ret = abx500_get_register_interruptible(info->dev, info->update_bank, info->update_reg, &regval); if (ret < 0) { dev_err(rdev_get_dev(rdev), "couldn't read 0x%x register\n", info->update_reg); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-is_enabled (bank, reg, mask, value):" " 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); if (((regval & info->update_mask) == info->update_val_lp) || ((regval & info->update_mask) == info->update_val_hp)) return 1; else return 0; } static int ab8500_ext_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode) { int ret = 0; struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); u8 regval; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } switch (mode) { case REGULATOR_MODE_NORMAL: regval = info->update_val_hp; break; case REGULATOR_MODE_IDLE: regval = info->update_val_lp; break; default: return -EINVAL; } /* If regulator is enabled and info->cfg->hwreq is set, the regulator must be on in high power, so we don't need to write the register with the same value. */ if (ab8500_ext_regulator_is_enabled(rdev) && !(info->cfg && info->cfg->hwreq)) { ret = abx500_mask_and_set_register_interruptible(info->dev, info->update_bank, info->update_reg, info->update_mask, regval); if (ret < 0) { dev_err(rdev_get_dev(rdev), "Could not set regulator mode.\n"); return ret; } dev_dbg(rdev_get_dev(rdev), "%s-set_mode (bank, reg, mask, value): " "0x%x, 0x%x, 0x%x, 0x%x\n", info->desc.name, info->update_bank, info->update_reg, info->update_mask, regval); } info->update_val = regval; return 0; } static unsigned int ab8500_ext_regulator_get_mode(struct regulator_dev *rdev) { struct ab8500_ext_regulator_info *info = rdev_get_drvdata(rdev); int ret; if (info == NULL) { dev_err(rdev_get_dev(rdev), "regulator info null pointer\n"); return -EINVAL; } if (info->update_val == info->update_val_hp) ret = REGULATOR_MODE_NORMAL; else if (info->update_val == info->update_val_lp) ret = REGULATOR_MODE_IDLE; else ret = -EINVAL; return ret; } static int ab8500_ext_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct regulation_constraints *regu_constraints = rdev->constraints; if (!regu_constraints) { dev_err(rdev_get_dev(rdev), "No regulator constraints\n"); return -EINVAL; } if (regu_constraints->min_uV == min_uV && regu_constraints->max_uV == max_uV) return 0; dev_err(rdev_get_dev(rdev), "Requested min %duV max %duV != constrained min %duV max %duV\n", min_uV, max_uV, regu_constraints->min_uV, regu_constraints->max_uV); return -EINVAL; } static int ab8500_ext_list_voltage(struct regulator_dev *rdev, unsigned selector) { struct regulation_constraints *regu_constraints = rdev->constraints; if (regu_constraints == NULL) { dev_err(rdev_get_dev(rdev), "regulator constraints null pointer\n"); return -EINVAL; } /* return the uV for the fixed regulators */ if (regu_constraints->min_uV && regu_constraints->max_uV) { if (regu_constraints->min_uV == regu_constraints->max_uV) return regu_constraints->min_uV; } return -EINVAL; } static struct regulator_ops ab8500_ext_regulator_ops = { .enable = ab8500_ext_regulator_enable, .disable = ab8500_ext_regulator_disable, .is_enabled = ab8500_ext_regulator_is_enabled, .set_mode = ab8500_ext_regulator_set_mode, .get_mode = ab8500_ext_regulator_get_mode, .set_voltage = ab8500_ext_set_voltage, .list_voltage = ab8500_ext_list_voltage, }; static struct ab8500_ext_regulator_info ab8500_ext_regulator_info[AB8500_NUM_EXT_REGULATORS] = { [AB8500_EXT_SUPPLY1] = { .desc = { .name = "VEXTSUPPLY1", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY1, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x03, .update_val = 0x01, .update_val_hp = 0x01, .update_val_lp = 0x03, .update_val_hw = 0x02, }, [AB8500_EXT_SUPPLY2] = { .desc = { .name = "VEXTSUPPLY2", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY2, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x0c, .update_val = 0x04, .update_val_hp = 0x04, .update_val_lp = 0x0c, .update_val_hw = 0x08, }, [AB8500_EXT_SUPPLY3] = { .desc = { .name = "VEXTSUPPLY3", .ops = &ab8500_ext_regulator_ops, .type = REGULATOR_VOLTAGE, .id = AB8500_EXT_SUPPLY3, .owner = THIS_MODULE, .n_voltages = 1, }, .update_bank = 0x04, .update_reg = 0x08, .update_mask = 0x30, .update_val = 0x10, .update_val_hp = 0x10, .update_val_lp = 0x30, .update_val_hw = 0x20, }, }; static struct of_regulator_match ab8500_ext_regulator_match[] = { { .name = "ab8500_ext1", .driver_data = (void *) AB8500_EXT_SUPPLY1, }, { .name = "ab8500_ext2", .driver_data = (void *) AB8500_EXT_SUPPLY2, }, { .name = "ab8500_ext3", .driver_data = (void *) AB8500_EXT_SUPPLY3, }, }; static int ab8500_ext_regulator_probe(struct platform_device *pdev) { struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); struct ab8500_platform_data *ppdata; struct ab8500_regulator_platform_data *pdata; struct device_node *np = pdev->dev.of_node; struct regulator_config config = { }; int i, err; if (np) { err = of_regulator_match(&pdev->dev, np, ab8500_ext_regulator_match, ARRAY_SIZE(ab8500_ext_regulator_match)); if (err < 0) { dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", err); return err; } } if (!ab8500) { dev_err(&pdev->dev, "null mfd parent\n"); return -EINVAL; } ppdata = dev_get_platdata(ab8500->dev); if (!ppdata) { dev_err(&pdev->dev, "null parent pdata\n"); return -EINVAL; } pdata = ppdata->regulator; if (!pdata) { dev_err(&pdev->dev, "null pdata\n"); return -EINVAL; } /* make sure the platform data has the correct size */ if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) { dev_err(&pdev->dev, "Configuration error: size mismatch.\n"); return -EINVAL; } /* check for AB8500 2.x */ if (is_ab8500_2p0_or_earlier(ab8500)) { struct ab8500_ext_regulator_info *info; /* VextSupply3LPn is inverted on AB8500 2.x */ info = &ab8500_ext_regulator_info[AB8500_EXT_SUPPLY3]; info->update_val = 0x30; info->update_val_hp = 0x30; info->update_val_lp = 0x10; } /* register all regulators */ for (i = 0; i < ARRAY_SIZE(ab8500_ext_regulator_info); i++) { struct ab8500_ext_regulator_info *info = NULL; /* assign per-regulator data */ info = &ab8500_ext_regulator_info[i]; info->dev = &pdev->dev; info->cfg = (struct ab8500_ext_regulator_cfg *) pdata->ext_regulator[i].driver_data; config.dev = &pdev->dev; config.driver_data = info; config.of_node = ab8500_ext_regulator_match[i].of_node; config.init_data = (np) ? ab8500_ext_regulator_match[i].init_data : &pdata->ext_regulator[i]; /* register regulator with framework */ info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); if (IS_ERR(info->rdev)) { err = PTR_ERR(info->rdev); dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); return err; } dev_dbg(rdev_get_dev(info->rdev), "%s-probed\n", info->desc.name); } return 0; } static struct platform_driver ab8500_ext_regulator_driver = { .probe = ab8500_ext_regulator_probe, .driver = { .name = "ab8500-ext-regulator", .owner = THIS_MODULE, }, }; static int __init ab8500_ext_regulator_init(void) { int ret; ret = platform_driver_register(&ab8500_ext_regulator_driver); if (ret) pr_err("Failed to register ab8500 ext regulator: %d\n", ret); return ret; } subsys_initcall(ab8500_ext_regulator_init); static void __exit ab8500_ext_regulator_exit(void) { platform_driver_unregister(&ab8500_ext_regulator_driver); } module_exit(ab8500_ext_regulator_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Bengt Jonsson <bengt.g.jonsson@stericsson.com>"); MODULE_DESCRIPTION("AB8500 external regulator driver"); MODULE_ALIAS("platform:ab8500-ext-regulator");
gpl-2.0
eoghan2t9/HTC-Wildfire-S-Kernel
drivers/net/irda/ksdazzle-sir.c
1365
23437
/***************************************************************************** * * Filename: ksdazzle.c * Version: 0.1.2 * Description: Irda KingSun Dazzle USB Dongle * Status: Experimental * Author: Alex Villacís Lasso <a_villacis@palosanto.com> * * Based on stir4200, mcs7780, kingsun-sir drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ /* * Following is my most current (2007-07-26) understanding of how the Kingsun * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This * information was deduced by examining the USB traffic captured with USBSnoopy * from the WinXP driver. Feel free to update here as more of the dongle is * known. * * General: This dongle exposes one interface with two interrupt endpoints, one * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir. * * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then * split it into multiple segments of up to 7 bytes each, and transmit each in * sequence. It seems that sending a single big block (like kingsun-sir does) * won't work with this dongle. Each segment needs to be prefixed with a value * equal to (unsigned char)0xF8 + <number of bytes in segment>, inside a payload * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9, * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the * payload, not considered by the prefix, are ignored (set to 0 by this * implementation). * * Reception: To receive data, the driver must poll the dongle regularly (like * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned * in payloads from 0 to 8 bytes long. When concatenated, these payloads form * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir * * Speed change: To change the speed of the dongle, the driver prepares a * control URB with the following as a setup packet: * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE * bRequest 0x09 * wValue 0x0200 * wIndex 0x0001 * wLength 0x0008 (length of the payload) * The payload is a 8-byte record, apparently identical to the one used in * drivers/usb/serial/cypress_m8.c to change speed: * __u32 baudSpeed; * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits * unsigned int : 1; * unsigned int stopBits : 1; * unsigned int parityEnable : 1; * unsigned int parityType : 1; * unsigned int : 1; * unsigned int reset : 1; * unsigned char reserved[3]; // set to 0 * * For now only SIR speeds have been observed with this dongle. Therefore, * nothing is known on what changes (if any) must be done to frame wrapping / * unwrapping for higher than SIR speeds. This driver assumes no change is * necessary and announces support for all the way to 115200 bps. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include <net/irda/irda.h> #include <net/irda/wrapper.h> #include <net/irda/crc.h> #define KSDAZZLE_VENDOR_ID 0x07d0 #define KSDAZZLE_PRODUCT_ID 0x4100 /* These are the currently known USB ids */ static struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, {} }; MODULE_DEVICE_TABLE(usb, dongles); #define KINGSUN_MTT 0x07 #define KINGSUN_REQ_RECV 0x01 #define KINGSUN_REQ_SEND 0x09 #define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ #define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */ struct ksdazzle_speedparams { __le32 baudrate; /* baud rate, little endian */ __u8 flags; __u8 reserved[3]; } __attribute__ ((packed)); #define KS_DATA_5_BITS 0x00 #define KS_DATA_6_BITS 0x01 #define KS_DATA_7_BITS 0x02 #define KS_DATA_8_BITS 0x03 #define KS_STOP_BITS_1 0x00 #define KS_STOP_BITS_2 0x08 #define KS_PAR_DISABLE 0x00 #define KS_PAR_EVEN 0x10 #define KS_PAR_ODD 0x30 #define KS_RESET 0x80 #define KINGSUN_EP_IN 0 #define KINGSUN_EP_OUT 1 struct ksdazzle_cb { struct usb_device *usbdev; /* init: probe_irda */ struct net_device *netdev; /* network layer */ struct irlap_cb *irlap; /* The link layer we are binded to */ struct qos_info qos; struct urb *tx_urb; __u8 *tx_buf_clear; unsigned int tx_buf_clear_used; unsigned int tx_buf_clear_sent; __u8 tx_payload[8]; struct urb *rx_urb; __u8 *rx_buf; iobuff_t rx_unwrap_buff; struct usb_ctrlrequest *speed_setuprequest; struct urb *speed_urb; struct ksdazzle_speedparams speedparams; unsigned int new_speed; __u8 ep_in; __u8 ep_out; spinlock_t lock; int receiving; }; /* Callback transmission routine */ static void ksdazzle_speed_irq(struct urb *urb) { /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("ksdazzle_speed_irq: urb asynchronously failed - %d", urb->status); } } /* Send a control request to change speed of the dongle */ static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed) { static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, 57600, 115200, 576000, 1152000, 4000000, 0 }; int err; unsigned int i; if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) return -ENOMEM; /* Check that requested speed is among the supported ones */ for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; if (supported_speeds[i] == 0) return -EOPNOTSUPP; memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams)); kingsun->speedparams.baudrate = cpu_to_le32(speed); kingsun->speedparams.flags = KS_DATA_8_BITS; /* speed_setuprequest pre-filled in ksdazzle_probe */ usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, usb_sndctrlpipe(kingsun->usbdev, 0), (unsigned char *)kingsun->speed_setuprequest, &(kingsun->speedparams), sizeof(struct ksdazzle_speedparams), ksdazzle_speed_irq, kingsun); kingsun->speed_urb->status = 0; err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); return err; } /* Submit one fragment of an IrDA frame to the dongle */ static void ksdazzle_send_irq(struct urb *urb); static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun) { unsigned int wraplen; int ret; /* We can send at most 7 bytes of payload at a time */ wraplen = 7; if (wraplen > kingsun->tx_buf_clear_used) wraplen = kingsun->tx_buf_clear_used; /* Prepare payload prefix with used length */ memset(kingsun->tx_payload, 0, 8); kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen; memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen); usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1); kingsun->tx_urb->status = 0; ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); /* Remember how much data was sent, in order to update at callback */ kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; return ret; } /* Callback transmission routine */ static void ksdazzle_send_irq(struct urb *urb) { struct ksdazzle_cb *kingsun = urb->context; struct net_device *netdev = kingsun->netdev; int ret = 0; /* in process of stopping, just drop data */ if (!netif_running(kingsun->netdev)) { err("ksdazzle_send_irq: Network not running!"); return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("ksdazzle_send_irq: urb asynchronously failed - %d", urb->status); return; } if (kingsun->tx_buf_clear_used > 0) { /* Update data remaining to be sent */ if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { memmove(kingsun->tx_buf_clear, kingsun->tx_buf_clear + kingsun->tx_buf_clear_sent, kingsun->tx_buf_clear_used - kingsun->tx_buf_clear_sent); } kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; kingsun->tx_buf_clear_sent = 0; if (kingsun->tx_buf_clear_used > 0) { /* There is more data to be sent */ if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { err("ksdazzle_send_irq: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } } else { /* All data sent, send next speed && wake network queue */ if (kingsun->new_speed != -1 && cpu_to_le32(kingsun->new_speed) != kingsun->speedparams.baudrate) ksdazzle_change_speed(kingsun, kingsun->new_speed); netif_wake_queue(netdev); } } } /* * Called from net/core when new frame is available. */ static netdev_tx_t ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ksdazzle_cb *kingsun; unsigned int wraplen; int ret = 0; netif_stop_queue(netdev); /* the IRDA wrapping routines don't deal with non linear skb */ SKB_LINEAR_ASSERT(skb); kingsun = netdev_priv(netdev); spin_lock(&kingsun->lock); kingsun->new_speed = irda_get_next_speed(skb); /* Append data to the end of whatever data remains to be transmitted */ wraplen = async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); kingsun->tx_buf_clear_used = wraplen; if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret); switch (ret) { case -ENODEV: case -EPIPE: break; default: netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); spin_unlock(&kingsun->lock); return NETDEV_TX_OK; } /* Receive callback function */ static void ksdazzle_rcv_irq(struct urb *urb) { struct ksdazzle_cb *kingsun = urb->context; struct net_device *netdev = kingsun->netdev; /* in process of stopping, just drop data */ if (!netif_running(netdev)) { kingsun->receiving = 0; return; } /* unlink, shutdown, unplug, other nasties */ if (urb->status != 0) { err("ksdazzle_rcv_irq: urb asynchronously failed - %d", urb->status); kingsun->receiving = 0; return; } if (urb->actual_length > 0) { __u8 *bytes = urb->transfer_buffer; unsigned int i; for (i = 0; i < urb->actual_length; i++) { async_unwrap_char(netdev, &netdev->stats, &kingsun->rx_unwrap_buff, bytes[i]); } kingsun->receiving = (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; } /* This urb has already been filled in ksdazzle_net_open. It is assumed that urb keeps the pointer to the payload buffer. */ urb->status = 0; usb_submit_urb(urb, GFP_ATOMIC); } /* * Function ksdazzle_net_open (dev) * * Network device is taken up. Usually this is done by "ifconfig irda0 up" */ static int ksdazzle_net_open(struct net_device *netdev) { struct ksdazzle_cb *kingsun = netdev_priv(netdev); int err = -ENOMEM; char hwname[16]; /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */ kingsun->receiving = 0; /* Initialize for SIR to copy data directly into skb. */ kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); if (!kingsun->rx_unwrap_buff.skb) goto free_mem; skb_reserve(kingsun->rx_unwrap_buff.skb, 1); kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->rx_urb) goto free_mem; kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->tx_urb) goto free_mem; kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kingsun->speed_urb) goto free_mem; /* Initialize speed for dongle */ kingsun->new_speed = 9600; err = ksdazzle_change_speed(kingsun, 9600); if (err < 0) goto free_mem; /* * Now that everything should be initialized properly, * Open new IrLAP layer instance to take care of us... */ sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { err("ksdazzle-sir: irlap_open failed"); goto free_mem; } /* Start reception. */ usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev, usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in), kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq, kingsun, 1); kingsun->rx_urb->status = 0; err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); if (err) { err("ksdazzle-sir: first urb-submit failed: %d", err); goto close_irlap; } netif_start_queue(netdev); /* Situation at this point: - all work buffers allocated - urbs allocated and ready to fill - max rx packet known (in max_rx) - unwrap state machine initialized, in state outside of any frame - receive request in progress - IrLAP layer started, about to hand over packets to send */ return 0; close_irlap: irlap_close(kingsun->irlap); free_mem: usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; if (kingsun->rx_unwrap_buff.skb) { kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; } return err; } /* * Function ksdazzle_net_close (dev) * * Network device is taken down. Usually this is done by * "ifconfig irda0 down" */ static int ksdazzle_net_close(struct net_device *netdev) { struct ksdazzle_cb *kingsun = netdev_priv(netdev); /* Stop transmit processing */ netif_stop_queue(netdev); /* Mop up receive && transmit urb's */ usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; kfree_skb(kingsun->rx_unwrap_buff.skb); kingsun->rx_unwrap_buff.skb = NULL; kingsun->rx_unwrap_buff.head = NULL; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->receiving = 0; /* Stop and remove instance of IrLAP */ irlap_close(kingsun->irlap); kingsun->irlap = NULL; return 0; } /* * IOCTLs : Extra out-of-band network commands... */ static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *)rq; struct ksdazzle_cb *kingsun = netdev_priv(netdev); int ret = 0; switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the device is still there */ if (netif_device_present(kingsun->netdev)) return ksdazzle_change_speed(kingsun, irq->ifr_baudrate); break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Check if the IrDA stack is still there */ if (netif_running(kingsun->netdev)) irda_device_set_media_busy(kingsun->netdev, TRUE); break; case SIOCGRECEIVING: /* Only approximately true */ irq->ifr_receiving = kingsun->receiving; break; default: ret = -EOPNOTSUPP; } return ret; } static const struct net_device_ops ksdazzle_ops = { .ndo_start_xmit = ksdazzle_hard_xmit, .ndo_open = ksdazzle_net_open, .ndo_stop = ksdazzle_net_close, .ndo_do_ioctl = ksdazzle_net_ioctl, }; /* * This routine is called by the USB subsystem for each new device * in the system. We need to check if the device is ours, and in * this case start handling it. */ static int ksdazzle_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct usb_device *dev = interface_to_usbdev(intf); struct ksdazzle_cb *kingsun = NULL; struct net_device *net = NULL; int ret = -ENOMEM; int pipe, maxp_in, maxp_out; __u8 ep_in; __u8 ep_out; /* Check that there really are two interrupt endpoints. Check based on the one in drivers/usb/input/usbmouse.c */ interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints != 2) { err("ksdazzle: expected 2 endpoints, found %d", interface->desc.bNumEndpoints); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_IN].desc; if (!usb_endpoint_is_int_in(endpoint)) { err("ksdazzle: endpoint 0 is not interrupt IN"); return -ENODEV; } ep_in = endpoint->bEndpointAddress; pipe = usb_rcvintpipe(dev, ep_in); maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); if (maxp_in > 255 || maxp_in <= 1) { err("ksdazzle: endpoint 0 has max packet size %d not in range [2..255]", maxp_in); return -ENODEV; } endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc; if (!usb_endpoint_is_int_out(endpoint)) { err("ksdazzle: endpoint 1 is not interrupt OUT"); return -ENODEV; } ep_out = endpoint->bEndpointAddress; pipe = usb_sndintpipe(dev, ep_out); maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); /* Allocate network device container. */ net = alloc_irdadev(sizeof(*kingsun)); if (!net) goto err_out1; SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->netdev = net; kingsun->usbdev = dev; kingsun->ep_in = ep_in; kingsun->ep_out = ep_out; kingsun->irlap = NULL; kingsun->tx_urb = NULL; kingsun->tx_buf_clear = NULL; kingsun->tx_buf_clear_used = 0; kingsun->tx_buf_clear_sent = 0; kingsun->rx_urb = NULL; kingsun->rx_buf = NULL; kingsun->rx_unwrap_buff.in_frame = FALSE; kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; kingsun->rx_unwrap_buff.skb = NULL; kingsun->receiving = 0; spin_lock_init(&kingsun->lock); kingsun->speed_setuprequest = NULL; kingsun->speed_urb = NULL; kingsun->speedparams.baudrate = 0; /* Allocate input buffer */ kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL); if (!kingsun->rx_buf) goto free_mem; /* Allocate output buffer */ kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); if (!kingsun->tx_buf_clear) goto free_mem; /* Allocate and initialize speed setup packet */ kingsun->speed_setuprequest = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); if (!kingsun->speed_setuprequest) goto free_mem; kingsun->speed_setuprequest->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); kingsun->speed_setuprequest->wLength = cpu_to_le16(sizeof(struct ksdazzle_speedparams)); printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, " "Vendor: %x, Product: %x\n", dev->devnum, le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); /* Initialize QoS for this device */ irda_init_max_qos_capabilies(&kingsun->qos); /* Baud rates known to be supported. Please uncomment if devices (other than a SonyEriccson K300 phone) can be shown to support higher speeds with this dongle. */ kingsun->qos.baud_rate.bits = IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200; kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; irda_qos_bits_to_value(&kingsun->qos); /* Override the network functions we need to use */ net->netdev_ops = &ksdazzle_ops; ret = register_netdev(net); if (ret != 0) goto free_mem; dev_info(&net->dev, "IrDA: Registered KingSun/Dazzle device %s\n", net->name); usb_set_intfdata(intf, kingsun); /* Situation at this point: - all work buffers allocated - setup requests pre-filled - urbs not allocated, set to NULL - max rx packet known (is KINGSUN_FIFO_SIZE) - unwrap state machine (partially) initialized, but skb == NULL */ return 0; free_mem: kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_buf); free_netdev(net); err_out1: return ret; } /* * The current device is removed, the USB layer tell us to shut it down... */ static void ksdazzle_disconnect(struct usb_interface *intf) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); if (!kingsun) return; unregister_netdev(kingsun->netdev); /* Mop up receive && transmit urb's */ usb_kill_urb(kingsun->speed_urb); usb_free_urb(kingsun->speed_urb); kingsun->speed_urb = NULL; usb_kill_urb(kingsun->tx_urb); usb_free_urb(kingsun->tx_urb); kingsun->tx_urb = NULL; usb_kill_urb(kingsun->rx_urb); usb_free_urb(kingsun->rx_urb); kingsun->rx_urb = NULL; kfree(kingsun->speed_setuprequest); kfree(kingsun->tx_buf_clear); kfree(kingsun->rx_buf); free_netdev(kingsun->netdev); usb_set_intfdata(intf, NULL); } #ifdef CONFIG_PM /* USB suspend, so power off the transmitter/receiver */ static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); netif_device_detach(kingsun->netdev); if (kingsun->speed_urb != NULL) usb_kill_urb(kingsun->speed_urb); if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb); if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb); return 0; } /* Coming out of suspend, so reset hardware */ static int ksdazzle_resume(struct usb_interface *intf) { struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); if (kingsun->rx_urb != NULL) { /* Setup request already filled in ksdazzle_probe */ usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); } netif_device_attach(kingsun->netdev); return 0; } #endif /* * USB device callbacks */ static struct usb_driver irda_driver = { .name = "ksdazzle-sir", .probe = ksdazzle_probe, .disconnect = ksdazzle_disconnect, .id_table = dongles, #ifdef CONFIG_PM .suspend = ksdazzle_suspend, .resume = ksdazzle_resume, #endif }; /* * Module insertion */ static int __init ksdazzle_init(void) { return usb_register(&irda_driver); } module_init(ksdazzle_init); /* * Module removal */ static void __exit ksdazzle_cleanup(void) { /* Deregister the driver and remove all pending instances */ usb_deregister(&irda_driver); } module_exit(ksdazzle_cleanup); MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>"); MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle"); MODULE_LICENSE("GPL");
gpl-2.0
mrimp/SM-G928T_Kernel
drivers/dma/dmatest.c
2133
33783
/* * DMA Engine test module * * Copyright (C) 2007 Atmel Corporation * Copyright (C) 2013 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/freezer.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/seq_file.h> static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); static char test_channel[20]; module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO); MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); static char test_device[20]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO); MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); static unsigned int threads_per_chan = 1; module_param(threads_per_chan, uint, S_IRUGO); MODULE_PARM_DESC(threads_per_chan, "Number of threads to start per channel (default: 1)"); static unsigned int max_channels; module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); static unsigned int iterations; module_param(iterations, uint, S_IRUGO); MODULE_PARM_DESC(iterations, "Iterations before stopping test (default: infinite)"); static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO); MODULE_PARM_DESC(xor_sources, "Number of xor source buffers (default: 3)"); static unsigned int pq_sources = 3; module_param(pq_sources, uint, S_IRUGO); MODULE_PARM_DESC(pq_sources, "Number of p+q source buffers (default: 3)"); static int timeout = 3000; module_param(timeout, uint, S_IRUGO); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); /* Maximum amount of mismatched bytes in buffer to print */ #define MAX_ERROR_COUNT 32 /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. * * Bit 6 is set for all bytes which are to be copied by the DMA * engine. Bit 5 is set for all bytes which are to be overwritten by * the DMA engine. * * The remaining bits are the inverse of a counter which increments by * one for each byte address. */ #define PATTERN_SRC 0x80 #define PATTERN_DST 0x00 #define PATTERN_COPY 0x40 #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f enum dmatest_error_type { DMATEST_ET_OK, DMATEST_ET_MAP_SRC, DMATEST_ET_MAP_DST, DMATEST_ET_PREP, DMATEST_ET_SUBMIT, DMATEST_ET_TIMEOUT, DMATEST_ET_DMA_ERROR, DMATEST_ET_DMA_IN_PROGRESS, DMATEST_ET_VERIFY, DMATEST_ET_VERIFY_BUF, }; struct dmatest_verify_buffer { unsigned int index; u8 expected; u8 actual; }; struct dmatest_verify_result { unsigned int error_count; struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; u8 pattern; bool is_srcbuf; }; struct dmatest_thread_result { struct list_head node; unsigned int n; unsigned int src_off; unsigned int dst_off; unsigned int len; enum dmatest_error_type type; union { unsigned long data; dma_cookie_t cookie; enum dma_status status; int error; struct dmatest_verify_result *vr; }; }; struct dmatest_result { struct list_head node; char *name; struct list_head results; }; struct dmatest_info; struct dmatest_thread { struct list_head node; struct dmatest_info *info; struct task_struct *task; struct dma_chan *chan; u8 **srcs; u8 **dsts; enum dma_transaction_type type; bool done; }; struct dmatest_chan { struct list_head node; struct dma_chan *chan; struct list_head threads; }; /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer * @channel: bus ID of the channel to test * @device: bus ID of the DMA Engine to test * @threads_per_chan: number of threads to start per channel * @max_channels: maximum number of channels to use * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers * @timeout: transfer timeout in msec, -1 for infinite timeout */ struct dmatest_params { unsigned int buf_size; char channel[20]; char device[20]; unsigned int threads_per_chan; unsigned int max_channels; unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; int timeout; }; /** * struct dmatest_info - test information. * @params: test parameters * @lock: access protection to the fields of this structure */ struct dmatest_info { /* Test parameters */ struct dmatest_params params; /* Internal state */ struct list_head channels; unsigned int nr_channels; struct mutex lock; /* debugfs related stuff */ struct dentry *root; struct dmatest_params dbgfs_params; /* Test results */ struct list_head results; struct mutex results_lock; }; static struct dmatest_info test_info; static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) { if (params->channel[0] == '\0') return true; return strcmp(dma_chan_name(chan), params->channel) == 0; } static bool dmatest_match_device(struct dmatest_params *params, struct dma_device *device) { if (params->device[0] == '\0') return true; return strcmp(dev_name(device->dev), params->device) == 0; } static unsigned long dmatest_random(void) { unsigned long buf; get_random_bytes(&buf, sizeof(buf)); return buf; } static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, unsigned int buf_size) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY | (~i & PATTERN_COUNT_MASK); for ( ; i < buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; } } static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, unsigned int buf_size) { unsigned int i; u8 *buf; for (; (buf = *bufs); bufs++) { for (i = 0; i < start; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); for ( ; i < start + len; i++) buf[i] = PATTERN_DST | PATTERN_OVERWRITE | (~i & PATTERN_COUNT_MASK); for ( ; i < buf_size; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); } } static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; u8 actual; u8 expected; u8 *buf; unsigned int counter_orig = counter; struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; for (i = start; i < end; i++) { actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { if (error_count < MAX_ERROR_COUNT && vr) { vb = &vr->data[error_count]; vb->index = i; vb->expected = expected; vb->actual = actual; } error_count++; } counter++; } } if (error_count > MAX_ERROR_COUNT) pr_warning("%s: %u errors suppressed\n", current->comm, error_count - MAX_ERROR_COUNT); return error_count; } /* poor man's completion - we want to use wait_event_freezable() on it */ struct dmatest_done { bool done; wait_queue_head_t *wait; }; static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; done->done = true; wake_up_all(done->wait); } static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len, unsigned int count) { while (count--) dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE); } static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, unsigned int count) { while (count--) dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); } static unsigned int min_odd(unsigned int x, unsigned int y) { unsigned int val = min(x, y); return val % 2 ? val : val - 1; } static char *verify_result_get_one(struct dmatest_verify_result *vr, unsigned int i) { struct dmatest_verify_buffer *vb = &vr->data[i]; u8 diff = vb->actual ^ vr->pattern; static char buf[512]; char *msg; if (vr->is_srcbuf) msg = "srcbuf overwritten!"; else if ((vr->pattern & PATTERN_COPY) && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) msg = "dstbuf not copied!"; else if (diff & PATTERN_SRC) msg = "dstbuf was copied!"; else msg = "dstbuf mismatch!"; snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, vb->index, vb->expected, vb->actual); return buf; } static char *thread_result_get(const char *name, struct dmatest_thread_result *tr) { static const char * const messages[] = { [DMATEST_ET_OK] = "No errors", [DMATEST_ET_MAP_SRC] = "src mapping error", [DMATEST_ET_MAP_DST] = "dst mapping error", [DMATEST_ET_PREP] = "prep error", [DMATEST_ET_SUBMIT] = "submit error", [DMATEST_ET_TIMEOUT] = "test timed out", [DMATEST_ET_DMA_ERROR] = "got completion callback (DMA_ERROR)", [DMATEST_ET_DMA_IN_PROGRESS] = "got completion callback (DMA_IN_PROGRESS)", [DMATEST_ET_VERIFY] = "errors", [DMATEST_ET_VERIFY_BUF] = "verify errors", }; static char buf[512]; snprintf(buf, sizeof(buf) - 1, "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, tr->len, tr->data); return buf; } static int thread_result_add(struct dmatest_info *info, struct dmatest_result *r, enum dmatest_error_type type, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, unsigned long data) { struct dmatest_thread_result *tr; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) return -ENOMEM; tr->type = type; tr->n = n; tr->src_off = src_off; tr->dst_off = dst_off; tr->len = len; tr->data = data; mutex_lock(&info->results_lock); list_add_tail(&tr->node, &r->results); mutex_unlock(&info->results_lock); pr_warn("%s\n", thread_result_get(r->name, tr)); return 0; } static unsigned int verify_result_add(struct dmatest_info *info, struct dmatest_result *r, unsigned int n, unsigned int src_off, unsigned int dst_off, unsigned int len, u8 **bufs, int whence, unsigned int counter, u8 pattern, bool is_srcbuf) { struct dmatest_verify_result *vr; unsigned int error_count; unsigned int buf_off = is_srcbuf ? src_off : dst_off; unsigned int start, end; if (whence < 0) { start = 0; end = buf_off; } else if (whence > 0) { start = buf_off + len; end = info->params.buf_size; } else { start = buf_off; end = buf_off + len; } vr = kmalloc(sizeof(*vr), GFP_KERNEL); if (!vr) { pr_warn("dmatest: No memory to store verify result\n"); return dmatest_verify(NULL, bufs, start, end, counter, pattern, is_srcbuf); } vr->pattern = pattern; vr->is_srcbuf = is_srcbuf; error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, is_srcbuf); if (error_count) { vr->error_count = error_count; thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, dst_off, len, (unsigned long)vr); return error_count; } kfree(vr); return 0; } static void result_free(struct dmatest_info *info, const char *name) { struct dmatest_result *r, *_r; mutex_lock(&info->results_lock); list_for_each_entry_safe(r, _r, &info->results, node) { struct dmatest_thread_result *tr, *_tr; if (name && strcmp(r->name, name)) continue; list_for_each_entry_safe(tr, _tr, &r->results, node) { if (tr->type == DMATEST_ET_VERIFY_BUF) kfree(tr->vr); list_del(&tr->node); kfree(tr); } kfree(r->name); list_del(&r->node); kfree(r); } mutex_unlock(&info->results_lock); } static struct dmatest_result *result_init(struct dmatest_info *info, const char *name) { struct dmatest_result *r; r = kzalloc(sizeof(*r), GFP_KERNEL); if (r) { r->name = kstrdup(name, GFP_KERNEL); INIT_LIST_HEAD(&r->results); mutex_lock(&info->results_lock); list_add_tail(&r->node, &info->results); mutex_unlock(&info->results_lock); } return r; } /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by * kthread_stop(). There may be multiple threads running this function * in parallel for a single channel, and there may be multiple channels * being tested in parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on * whether it's in an area which is supposed to be copied or * overwritten, and different in the source and destination buffers. * So if the DMA engine doesn't copy exactly what we tell it to copy, * we'll notice. */ static int dmatest_func(void *data) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; struct dmatest_info *info; struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; u8 *pq_coefs = NULL; int ret; int src_cnt; int dst_cnt; int i; struct dmatest_result *result; thread_name = current->comm; set_freezable(); ret = -ENOMEM; smp_rmb(); info = thread->info; params = &info->params; chan = thread->chan; dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst_cnt = 1; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); if (!pq_coefs) goto err_thread_type; for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else goto err_thread_type; result = result_init(info, thread_name); if (!result) goto err_srcs; thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; for (i = 0; i < src_cnt; i++) { thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->srcs[i]) goto err_srcbuf; } thread->srcs[i] = NULL; thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->dsts) goto err_dsts; for (i = 0; i < dst_cnt; i++) { thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->dsts[i]) goto err_dstbuf; } thread->dsts[i] = NULL; set_user_nice(current, 10); /* * src buffers are freed by the DMAEngine code with dma_unmap_single() * dst buffers are freed by ourselves below */ flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; u8 align = 0; total_tests++; /* honor alignment restrictions */ if (thread->type == DMA_MEMCPY) align = dev->copy_align; else if (thread->type == DMA_XOR) align = dev->xor_align; else if (thread->type == DMA_PQ) align = dev->pq_align; if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", params->buf_size, 1 << align); break; } len = dmatest_random() % params->buf_size + 1; len = (len >> align) << align; if (!len) len = 1 << align; src_off = dmatest_random() % (params->buf_size - len + 1); dst_off = dmatest_random() % (params->buf_size - len + 1); src_off = (src_off >> align) << align; dst_off = (dst_off >> align) << align; dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; dma_srcs[i] = dma_map_single(dev->dev, buf, len, DMA_TO_DEVICE); ret = dma_mapping_error(dev->dev, dma_srcs[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, i); thread_result_add(info, result, DMATEST_ET_MAP_SRC, total_tests, src_off, dst_off, len, ret); failed_tests++; continue; } } /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ for (i = 0; i < dst_cnt; i++) { dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], params->buf_size, DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dma_dsts[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, i); thread_result_add(info, result, DMATEST_ET_MAP_DST, total_tests, src_off, dst_off, len, ret); failed_tests++; continue; } } if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dma_dsts[0] + dst_off, dma_srcs[0], len, flags); else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dma_dsts[0] + dst_off, dma_srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; for (i = 0; i < dst_cnt; i++) dma_pq[i] = dma_dsts[i] + dst_off; tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, src_cnt, pq_coefs, len, flags); } if (!tx) { unmap_src(dev->dev, dma_srcs, len, src_cnt); unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); thread_result_add(info, result, DMATEST_ET_PREP, total_tests, src_off, dst_off, len, 0); msleep(100); failed_tests++; continue; } done.done = false; tx->callback = dmatest_callback; tx->callback_param = &done; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { thread_result_add(info, result, DMATEST_ET_SUBMIT, total_tests, src_off, dst_off, len, cookie); msleep(100); failed_tests++; continue; } dma_async_issue_pending(chan); wait_event_freezable_timeout(done_wait, done.done, msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); if (!done.done) { /* * We're leaving the timed out dma operation with * dangling pointer to done_wait. To make this * correct, we'll need to allocate wait_done for * each test iteration and perform "who's gonna * free it this time?" dancing. For now, just * leave it dangling. */ thread_result_add(info, result, DMATEST_ET_TIMEOUT, total_tests, src_off, dst_off, len, 0); failed_tests++; continue; } else if (status != DMA_SUCCESS) { enum dmatest_error_type type = (status == DMA_ERROR) ? DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; thread_result_add(info, result, type, total_tests, src_off, dst_off, len, status); failed_tests++; continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->srcs, -1, 0, PATTERN_SRC, true); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->srcs, 0, src_off, PATTERN_SRC | PATTERN_COPY, true); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->srcs, 1, src_off + len, PATTERN_SRC, true); pr_debug("%s: verifying dest buffer...\n", thread_name); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->dsts, -1, 0, PATTERN_DST, false); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->dsts, 0, src_off, PATTERN_SRC | PATTERN_COPY, false); error_count += verify_result_add(info, result, total_tests, src_off, dst_off, len, thread->dsts, 1, dst_off + len, PATTERN_DST, false); if (error_count) { thread_result_add(info, result, DMATEST_ET_VERIFY, total_tests, src_off, dst_off, len, error_count); failed_tests++; } else { thread_result_add(info, result, DMATEST_ET_OK, total_tests, src_off, dst_off, len, 0); } } ret = 0; for (i = 0; thread->dsts[i]; i++) kfree(thread->dsts[i]); err_dstbuf: kfree(thread->dsts); err_dsts: for (i = 0; thread->srcs[i]; i++) kfree(thread->srcs[i]); err_srcbuf: kfree(thread->srcs); err_srcs: kfree(pq_coefs); err_thread_type: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); /* terminate all transfers on specified channels */ if (ret) dmaengine_terminate_all(chan); thread->done = true; if (params->iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); } return ret; } static void dmatest_cleanup_channel(struct dmatest_chan *dtc) { struct dmatest_thread *thread; struct dmatest_thread *_thread; int ret; list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { ret = kthread_stop(thread->task); pr_debug("dmatest: thread %s exited with status %d\n", thread->task->comm, ret); list_del(&thread->node); kfree(thread); } /* terminate all transfers on specified channels */ dmaengine_terminate_all(dtc->chan); kfree(dtc); } static int dmatest_add_threads(struct dmatest_info *info, struct dmatest_chan *dtc, enum dma_transaction_type type) { struct dmatest_params *params = &info->params; struct dmatest_thread *thread; struct dma_chan *chan = dtc->chan; char *op; unsigned int i; if (type == DMA_MEMCPY) op = "copy"; else if (type == DMA_XOR) op = "xor"; else if (type == DMA_PQ) op = "pq"; else return -EINVAL; for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warning("dmatest: No memory for %s-%s%u\n", dma_chan_name(chan), op, i); break; } thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", dma_chan_name(chan), op, i); if (IS_ERR(thread->task)) { pr_warning("dmatest: Failed to run thread %s-%s%u\n", dma_chan_name(chan), op, i); kfree(thread); break; } /* srcbuf and dstbuf are allocated by the thread itself */ list_add_tail(&thread->node, &dtc->threads); } return i; } static int dmatest_add_channel(struct dmatest_info *info, struct dma_chan *chan) { struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; unsigned int thread_count = 0; int cnt; dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); if (!dtc) { pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); return -ENOMEM; } dtc->chan = chan; INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { cnt = dmatest_add_threads(info, dtc, DMA_PQ); thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); info->nr_channels++; return 0; } static bool filter(struct dma_chan *chan, void *param) { struct dmatest_params *params = param; if (!dmatest_match_channel(params, chan) || !dmatest_match_device(params, chan->device)) return false; else return true; } static int __run_threaded_test(struct dmatest_info *info) { dma_cap_mask_t mask; struct dma_chan *chan; struct dmatest_params *params = &info->params; int err = 0; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for (;;) { chan = dma_request_channel(mask, filter, params); if (chan) { err = dmatest_add_channel(info, chan); if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } } else break; /* no more channels available */ if (params->max_channels && info->nr_channels >= params->max_channels) break; /* we have all we need */ } return err; } #ifndef MODULE static int run_threaded_test(struct dmatest_info *info) { int ret; mutex_lock(&info->lock); ret = __run_threaded_test(info); mutex_unlock(&info->lock); return ret; } #endif static void __stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } info->nr_channels = 0; } static void stop_threaded_test(struct dmatest_info *info) { mutex_lock(&info->lock); __stop_threaded_test(info); mutex_unlock(&info->lock); } static int __restart_threaded_test(struct dmatest_info *info, bool run) { struct dmatest_params *params = &info->params; /* Stop any running test first */ __stop_threaded_test(info); if (run == false) return 0; /* Clear results from previous run */ result_free(info, NULL); /* Copy test parameters */ memcpy(params, &info->dbgfs_params, sizeof(*params)); /* Run test with new parameters */ return __run_threaded_test(info); } static bool __is_threaded_test_run(struct dmatest_info *info) { struct dmatest_chan *dtc; list_for_each_entry(dtc, &info->channels, node) { struct dmatest_thread *thread; list_for_each_entry(thread, &dtc->threads, node) { if (!thread->done) return true; } } return false; } static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count) { char tmp[20]; ssize_t len; len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); if (len >= 0) { tmp[len] = '\0'; strlcpy(to, strim(tmp), available); } return len; } static ssize_t dtf_read_channel(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dmatest_info *info = file->private_data; return simple_read_from_buffer(buf, count, ppos, info->dbgfs_params.channel, strlen(info->dbgfs_params.channel)); } static ssize_t dtf_write_channel(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct dmatest_info *info = file->private_data; return dtf_write_string(info->dbgfs_params.channel, sizeof(info->dbgfs_params.channel), ppos, buf, size); } static const struct file_operations dtf_channel_fops = { .read = dtf_read_channel, .write = dtf_write_channel, .open = simple_open, .llseek = default_llseek, }; static ssize_t dtf_read_device(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dmatest_info *info = file->private_data; return simple_read_from_buffer(buf, count, ppos, info->dbgfs_params.device, strlen(info->dbgfs_params.device)); } static ssize_t dtf_write_device(struct file *file, const char __user *buf, size_t size, loff_t *ppos) { struct dmatest_info *info = file->private_data; return dtf_write_string(info->dbgfs_params.device, sizeof(info->dbgfs_params.device), ppos, buf, size); } static const struct file_operations dtf_device_fops = { .read = dtf_read_device, .write = dtf_write_device, .open = simple_open, .llseek = default_llseek, }; static ssize_t dtf_read_run(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct dmatest_info *info = file->private_data; char buf[3]; mutex_lock(&info->lock); if (__is_threaded_test_run(info)) { buf[0] = 'Y'; } else { __stop_threaded_test(info); buf[0] = 'N'; } mutex_unlock(&info->lock); buf[1] = '\n'; buf[2] = 0x00; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct dmatest_info *info = file->private_data; char buf[16]; bool bv; int ret = 0; if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) return -EFAULT; if (strtobool(buf, &bv) == 0) { mutex_lock(&info->lock); if (__is_threaded_test_run(info)) ret = -EBUSY; else ret = __restart_threaded_test(info, bv); mutex_unlock(&info->lock); } return ret ? ret : count; } static const struct file_operations dtf_run_fops = { .read = dtf_read_run, .write = dtf_write_run, .open = simple_open, .llseek = default_llseek, }; static int dtf_results_show(struct seq_file *sf, void *data) { struct dmatest_info *info = sf->private; struct dmatest_result *result; struct dmatest_thread_result *tr; unsigned int i; mutex_lock(&info->results_lock); list_for_each_entry(result, &info->results, node) { list_for_each_entry(tr, &result->results, node) { seq_printf(sf, "%s\n", thread_result_get(result->name, tr)); if (tr->type == DMATEST_ET_VERIFY_BUF) { for (i = 0; i < tr->vr->error_count; i++) { seq_printf(sf, "\t%s\n", verify_result_get_one(tr->vr, i)); } } } } mutex_unlock(&info->results_lock); return 0; } static int dtf_results_open(struct inode *inode, struct file *file) { return single_open(file, dtf_results_show, inode->i_private); } static const struct file_operations dtf_results_fops = { .open = dtf_results_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dmatest_register_dbgfs(struct dmatest_info *info) { struct dentry *d; struct dmatest_params *params = &info->dbgfs_params; int ret = -ENOMEM; d = debugfs_create_dir("dmatest", NULL); if (IS_ERR(d)) return PTR_ERR(d); if (!d) goto err_root; info->root = d; /* Copy initial values */ memcpy(params, &info->params, sizeof(*params)); /* Test parameters */ d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->buf_size); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, info, &dtf_channel_fops); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, info, &dtf_device_fops); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->threads_per_chan); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->max_channels); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->iterations); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->xor_sources); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->pq_sources); if (IS_ERR_OR_NULL(d)) goto err_node; d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, (u32 *)&params->timeout); if (IS_ERR_OR_NULL(d)) goto err_node; /* Run or stop threaded test */ d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info, &dtf_run_fops); if (IS_ERR_OR_NULL(d)) goto err_node; /* Results of test in progress */ d = debugfs_create_file("results", S_IRUGO, info->root, info, &dtf_results_fops); if (IS_ERR_OR_NULL(d)) goto err_node; return 0; err_node: debugfs_remove_recursive(info->root); err_root: pr_err("dmatest: Failed to initialize debugfs\n"); return ret; } static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; struct dmatest_params *params = &info->params; int ret; memset(info, 0, sizeof(*info)); mutex_init(&info->lock); INIT_LIST_HEAD(&info->channels); mutex_init(&info->results_lock); INIT_LIST_HEAD(&info->results); /* Set default parameters */ params->buf_size = test_buf_size; strlcpy(params->channel, test_channel, sizeof(params->channel)); strlcpy(params->device, test_device, sizeof(params->device)); params->threads_per_chan = threads_per_chan; params->max_channels = max_channels; params->iterations = iterations; params->xor_sources = xor_sources; params->pq_sources = pq_sources; params->timeout = timeout; ret = dmatest_register_dbgfs(info); if (ret) return ret; #ifdef MODULE return 0; #else return run_threaded_test(info); #endif } /* when compiled-in wait for drivers to load first */ late_initcall(dmatest_init); static void __exit dmatest_exit(void) { struct dmatest_info *info = &test_info; debugfs_remove_recursive(info->root); stop_threaded_test(info); result_free(info, NULL); } module_exit(dmatest_exit); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_LICENSE("GPL v2");
gpl-2.0
sunnyden/ubuntu_kernel
drivers/rapidio/switches/tsi568.c
2389
5184
/* * RapidIO Tsi568 switch support * * Copyright 2009-2010 Integrated Device Technology, Inc. * Alexandre Bounine <alexandre.bounine@idt.com> * - Added EM support * - Modified switch operations initialization. * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/rio.h> #include <linux/rio_drv.h> #include <linux/rio_ids.h> #include <linux/delay.h> #include <linux/module.h> #include "../rio.h" /* Global (broadcast) route registers */ #define SPBC_ROUTE_CFG_DESTID 0x10070 #define SPBC_ROUTE_CFG_PORT 0x10074 /* Per port route registers */ #define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) #define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) #define TSI568_SP_MODE(n) (0x11004 + 0x100*n) #define TSI568_SP_MODE_PW_DIS 0x08000000 static int tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port) { if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_DESTID, route_destid); rio_mport_write_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_PORT, route_port); } else { rio_mport_write_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_DESTID(table), route_destid); rio_mport_write_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_PORT(table), route_port); } udelay(10); return 0; } static int tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 *route_port) { int ret = 0; u32 result; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_DESTID, route_destid); rio_mport_read_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_PORT, &result); } else { rio_mport_write_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_DESTID(table), route_destid); rio_mport_read_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_PORT(table), &result); } *route_port = result; if (*route_port > 15) ret = -1; return ret; } static int tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, u16 table) { u32 route_idx; u32 lut_size; lut_size = (mport->sys_size) ? 0x1ff : 0xff; if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_DESTID, 0x80000000); for (route_idx = 0; route_idx <= lut_size; route_idx++) rio_mport_write_config_32(mport, destid, hopcount, SPBC_ROUTE_CFG_PORT, RIO_INVALID_ROUTE); } else { rio_mport_write_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_DESTID(table), 0x80000000); for (route_idx = 0; route_idx <= lut_size; route_idx++) rio_mport_write_config_32(mport, destid, hopcount, SPP_ROUTE_CFG_PORT(table), RIO_INVALID_ROUTE); } return 0; } static int tsi568_em_init(struct rio_dev *rdev) { u32 regval; int portnum; pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); /* Make sure that Port-Writes are disabled (for all ports) */ for (portnum = 0; portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval); rio_write_config_32(rdev, TSI568_SP_MODE(portnum), regval | TSI568_SP_MODE_PW_DIS); } return 0; } static struct rio_switch_ops tsi568_switch_ops = { .owner = THIS_MODULE, .add_entry = tsi568_route_add_entry, .get_entry = tsi568_route_get_entry, .clr_table = tsi568_route_clr_table, .set_domain = NULL, .get_domain = NULL, .em_init = tsi568_em_init, .em_handle = NULL, }; static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops) { spin_unlock(&rdev->rswitch->lock); return -EINVAL; } rdev->rswitch->ops = &tsi568_switch_ops; spin_unlock(&rdev->rswitch->lock); return 0; } static void tsi568_remove(struct rio_dev *rdev) { pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); spin_lock(&rdev->rswitch->lock); if (rdev->rswitch->ops != &tsi568_switch_ops) { spin_unlock(&rdev->rswitch->lock); return; } rdev->rswitch->ops = NULL; spin_unlock(&rdev->rswitch->lock); } static struct rio_device_id tsi568_id_table[] = { {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, { 0, } /* terminate list */ }; static struct rio_driver tsi568_driver = { .name = "tsi568", .id_table = tsi568_id_table, .probe = tsi568_probe, .remove = tsi568_remove, }; static int __init tsi568_init(void) { return rio_register_driver(&tsi568_driver); } static void __exit tsi568_exit(void) { rio_unregister_driver(&tsi568_driver); } device_initcall(tsi568_init); module_exit(tsi568_exit); MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
ndmsystems/linux-2.6.36
drivers/net/appletalk/cops.c
3925
29434
/* cops.c: LocalTalk driver for Linux. * * Authors: * - Jay Schulist <jschlst@samba.org> * * With more than a little help from; * - Alan Cox <alan@lxorguk.ukuu.org.uk> * * Derived from: * - skeleton.c: A network driver outline for linux. * Written 1993-94 by Donald Becker. * - ltpc.c: A driver for the LocalTalk PC card. * Written by Bradford W. Johnson. * * Copyright 1993 United States Government as represented by the * Director, National Security Agency. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Changes: * 19970608 Alan Cox Allowed dual card type support * Can set board type in insmod * Hooks for cops_setup routine * (not yet implemented). * 19971101 Jay Schulist Fixes for multiple lt* devices. * 19980607 Steven Hirsch Fixed the badly broken support * for Tangent type cards. Only * tested on Daystar LT200. Some * cleanup of formatting and program * logic. Added emacs 'local-vars' * setup for Jay's brace style. * 20000211 Alan Cox Cleaned up for softnet */ static const char *version = "cops.c:v0.04 6/7/98 Jay Schulist <jschlst@samba.org>\n"; /* * Sources: * COPS Localtalk SDK. This provides almost all of the information * needed. */ /* * insmod/modprobe configurable stuff. * - IO Port, choose one your card supports or 0 if you dare. * - IRQ, also choose one your card supports or nothing and let * the driver figure it out. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/if_ltalk.h> #include <linux/delay.h> /* For udelay() */ #include <linux/atalk.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/jiffies.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include "cops.h" /* Our Stuff */ #include "cops_ltdrv.h" /* Firmware code for Tangent type cards. */ #include "cops_ffdrv.h" /* Firmware code for Dayna type cards. */ /* * The name of the card. Is used for messages and in the requests for * io regions, irqs and dma channels */ static const char *cardname = "cops"; #ifdef CONFIG_COPS_DAYNA static int board_type = DAYNA; /* Module exported */ #else static int board_type = TANGENT; #endif static int io = 0x240; /* Default IO for Dayna */ static int irq = 5; /* Default IRQ */ /* * COPS Autoprobe information. * Right now if port address is right but IRQ is not 5 this will * return a 5 no matter what since we will still get a status response. * Need one more additional check to narrow down after we have gotten * the ioaddr. But since only other possible IRQs is 3 and 4 so no real * hurry on this. I *STRONGLY* recommend using IRQ 5 for your card with * this driver. * * This driver has 2 modes and they are: Dayna mode and Tangent mode. * Each mode corresponds with the type of card. It has been found * that there are 2 main types of cards and all other cards are * the same and just have different names or only have minor differences * such as more IO ports. As this driver is tested it will * become more clear on exactly what cards are supported. The driver * defaults to using Dayna mode. To change the drivers mode, simply * select Dayna or Tangent mode when configuring the kernel. * * This driver should support: * TANGENT driver mode: * Tangent ATB-II, Novell NL-1000, Daystar Digital LT-200, * COPS LT-1 * DAYNA driver mode: * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95, * Farallon PhoneNET PC III, Farallon PhoneNET PC II * Other cards possibly supported mode unknown though: * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel) * * Cards NOT supported by this driver but supported by the ltpc.c * driver written by Bradford W. Johnson <johns393@maroon.tc.umn.edu> * Farallon PhoneNET PC * Original Apple LocalTalk PC card * * N.B. * * The Daystar Digital LT200 boards do not support interrupt-driven * IO. You must specify 'irq=0xff' as a module parameter to invoke * polled mode. I also believe that the port probing logic is quite * dangerous at best and certainly hopeless for a polled card. Best to * specify both. - Steve H. * */ /* * Zero terminated list of IO ports to probe. */ static unsigned int ports[] = { 0x240, 0x340, 0x200, 0x210, 0x220, 0x230, 0x260, 0x2A0, 0x300, 0x310, 0x320, 0x330, 0x350, 0x360, 0 }; /* * Zero terminated list of IRQ ports to probe. */ static int cops_irqlist[] = { 5, 4, 3, 0 }; static struct timer_list cops_timer; /* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */ #ifndef COPS_DEBUG #define COPS_DEBUG 1 #endif static unsigned int cops_debug = COPS_DEBUG; /* The number of low I/O ports used by the card. */ #define COPS_IO_EXTENT 8 /* Information that needs to be kept for each board. */ struct cops_local { int board; /* Holds what board type is. */ int nodeid; /* Set to 1 once have nodeid. */ unsigned char node_acquire; /* Node ID when acquired. */ struct atalk_addr node_addr; /* Full node address */ spinlock_t lock; /* RX/TX lock */ }; /* Index to functions, as function prototypes. */ static int cops_probe1 (struct net_device *dev, int ioaddr); static int cops_irq (int ioaddr, int board); static int cops_open (struct net_device *dev); static int cops_jumpstart (struct net_device *dev); static void cops_reset (struct net_device *dev, int sleep); static void cops_load (struct net_device *dev); static int cops_nodeid (struct net_device *dev, int nodeid); static irqreturn_t cops_interrupt (int irq, void *dev_id); static void cops_poll (unsigned long ltdev); static void cops_timeout(struct net_device *dev); static void cops_rx (struct net_device *dev); static netdev_tx_t cops_send_packet (struct sk_buff *skb, struct net_device *dev); static void set_multicast_list (struct net_device *dev); static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int cops_close (struct net_device *dev); static void cleanup_card(struct net_device *dev) { if (dev->irq) free_irq(dev->irq, dev); release_region(dev->base_addr, COPS_IO_EXTENT); } /* * Check for a network adaptor of this type, and return '0' iff one exists. * If dev->base_addr == 0, probe all likely locations. * If dev->base_addr in [1..0x1ff], always return failure. * otherwise go with what we pass in. */ struct net_device * __init cops_probe(int unit) { struct net_device *dev; unsigned *port; int base_addr; int err = 0; dev = alloc_ltalkdev(sizeof(struct cops_local)); if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "lt%d", unit); netdev_boot_setup_check(dev); irq = dev->irq; base_addr = dev->base_addr; } else { base_addr = dev->base_addr = io; } if (base_addr > 0x1ff) { /* Check a single specified location. */ err = cops_probe1(dev, base_addr); } else if (base_addr != 0) { /* Don't probe at all. */ err = -ENXIO; } else { /* FIXME Does this really work for cards which generate irq? * It's definitely N.G. for polled Tangent. sh * Dayna cards don't autoprobe well at all, but if your card is * at IRQ 5 & IO 0x240 we find it every time. ;) JS */ for (port = ports; *port && cops_probe1(dev, *port) < 0; port++) ; if (!*port) err = -ENODEV; } if (err) goto out; err = register_netdev(dev); if (err) goto out1; return dev; out1: cleanup_card(dev); out: free_netdev(dev); return ERR_PTR(err); } static const struct net_device_ops cops_netdev_ops = { .ndo_open = cops_open, .ndo_stop = cops_close, .ndo_start_xmit = cops_send_packet, .ndo_tx_timeout = cops_timeout, .ndo_do_ioctl = cops_ioctl, .ndo_set_multicast_list = set_multicast_list, }; /* * This is the real probe routine. Linux has a history of friendly device * probes on the ISA bus. A good device probes avoids doing writes, and * verifies that the correct device exists and functions. */ static int __init cops_probe1(struct net_device *dev, int ioaddr) { struct cops_local *lp; static unsigned version_printed; int board = board_type; int retval; if(cops_debug && version_printed++ == 0) printk("%s", version); /* Grab the region so no one else tries to probe our ioports. */ if (!request_region(ioaddr, COPS_IO_EXTENT, dev->name)) return -EBUSY; /* * Since this board has jumpered interrupts, allocate the interrupt * vector now. There is no point in waiting since no other device * can use the interrupt, and this marks the irq as busy. Jumpered * interrupts are typically not reported by the boards, and we must * used AutoIRQ to find them. */ dev->irq = irq; switch (dev->irq) { case 0: /* COPS AutoIRQ routine */ dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; /* No IRQ found on this port, fallthrough */ case 1: retval = -EINVAL; goto err_out; /* Fixup for users that don't know that IRQ 2 is really * IRQ 9, or don't know which one to set. */ case 2: dev->irq = 9; break; /* Polled operation requested. Although irq of zero passed as * a parameter tells the init routines to probe, we'll * overload it to denote polled operation at runtime. */ case 0xff: dev->irq = 0; break; default: break; } /* Reserve any actual interrupt. */ if (dev->irq) { retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev); if (retval) goto err_out; } dev->base_addr = ioaddr; lp = netdev_priv(dev); spin_lock_init(&lp->lock); /* Copy local board variable to lp struct. */ lp->board = board; dev->netdev_ops = &cops_netdev_ops; dev->watchdog_timeo = HZ * 2; /* Tell the user where the card is and what mode we're in. */ if(board==DAYNA) printk("%s: %s at %#3x, using IRQ %d, in Dayna mode.\n", dev->name, cardname, ioaddr, dev->irq); if(board==TANGENT) { if(dev->irq) printk("%s: %s at %#3x, IRQ %d, in Tangent mode\n", dev->name, cardname, ioaddr, dev->irq); else printk("%s: %s at %#3x, using polled IO, in Tangent mode.\n", dev->name, cardname, ioaddr); } return 0; err_out: release_region(ioaddr, COPS_IO_EXTENT); return retval; } static int __init cops_irq (int ioaddr, int board) { /* * This does not use the IRQ to determine where the IRQ is. We just * assume that when we get a correct status response that it's the IRQ. * This really just verifies the IO port but since we only have access * to such a small number of IRQs (5, 4, 3) this is not bad. * This will probably not work for more than one card. */ int irqaddr=0; int i, x, status; if(board==DAYNA) { outb(0, ioaddr+DAYNA_RESET); inb(ioaddr+DAYNA_RESET); mdelay(333); } if(board==TANGENT) { inb(ioaddr); outb(0, ioaddr); outb(0, ioaddr+TANG_RESET); } for(i=0; cops_irqlist[i] !=0; i++) { irqaddr = cops_irqlist[i]; for(x = 0xFFFF; x>0; x --) /* wait for response */ { if(board==DAYNA) { status = (inb(ioaddr+DAYNA_CARD_STATUS)&3); if(status == 1) return irqaddr; } if(board==TANGENT) { if((inb(ioaddr+TANG_CARD_STATUS)& TANG_TX_READY) !=0) return irqaddr; } } } return 0; /* no IRQ found */ } /* * Open/initialize the board. This is called (in the current kernel) * sometime after booting when the 'ifconfig' program is run. */ static int cops_open(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); if(dev->irq==0) { /* * I don't know if the Dayna-style boards support polled * operation. For now, only allow it for Tangent. */ if(lp->board==TANGENT) /* Poll 20 times per second */ { init_timer(&cops_timer); cops_timer.function = cops_poll; cops_timer.data = (unsigned long)dev; cops_timer.expires = jiffies + HZ/20; add_timer(&cops_timer); } else { printk(KERN_WARNING "%s: No irq line set\n", dev->name); return -EAGAIN; } } cops_jumpstart(dev); /* Start the card up. */ netif_start_queue(dev); return 0; } /* * This allows for a dynamic start/restart of the entire card. */ static int cops_jumpstart(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); /* * Once the card has the firmware loaded and has acquired * the nodeid, if it is reset it will lose it all. */ cops_reset(dev,1); /* Need to reset card before load firmware. */ cops_load(dev); /* Load the firmware. */ /* * If atalkd already gave us a nodeid we will use that * one again, else we wait for atalkd to give us a nodeid * in cops_ioctl. This may cause a problem if someone steals * our nodeid while we are resetting. */ if(lp->nodeid == 1) cops_nodeid(dev,lp->node_acquire); return 0; } static void tangent_wait_reset(int ioaddr) { int timeout=0; while(timeout++ < 5 && (inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) mdelay(1); /* Wait 1 second */ } /* * Reset the LocalTalk board. */ static void cops_reset(struct net_device *dev, int sleep) { struct cops_local *lp = netdev_priv(dev); int ioaddr=dev->base_addr; if(lp->board==TANGENT) { inb(ioaddr); /* Clear request latch. */ outb(0,ioaddr); /* Clear the TANG_TX_READY flop. */ outb(0, ioaddr+TANG_RESET); /* Reset the adapter. */ tangent_wait_reset(ioaddr); outb(0, ioaddr+TANG_CLEAR_INT); } if(lp->board==DAYNA) { outb(0, ioaddr+DAYNA_RESET); /* Assert the reset port */ inb(ioaddr+DAYNA_RESET); /* Clear the reset */ if (sleep) msleep(333); else mdelay(333); } netif_wake_queue(dev); } static void cops_load (struct net_device *dev) { struct ifreq ifr; struct ltfirmware *ltf= (struct ltfirmware *)&ifr.ifr_ifru; struct cops_local *lp = netdev_priv(dev); int ioaddr=dev->base_addr; int length, i = 0; strcpy(ifr.ifr_name,"lt0"); /* Get card's firmware code and do some checks on it. */ #ifdef CONFIG_COPS_DAYNA if(lp->board==DAYNA) { ltf->length=sizeof(ffdrv_code); ltf->data=ffdrv_code; } else #endif #ifdef CONFIG_COPS_TANGENT if(lp->board==TANGENT) { ltf->length=sizeof(ltdrv_code); ltf->data=ltdrv_code; } else #endif { printk(KERN_INFO "%s; unsupported board type.\n", dev->name); return; } /* Check to make sure firmware is correct length. */ if(lp->board==DAYNA && ltf->length!=5983) { printk(KERN_WARNING "%s: Firmware is not length of FFDRV.BIN.\n", dev->name); return; } if(lp->board==TANGENT && ltf->length!=2501) { printk(KERN_WARNING "%s: Firmware is not length of DRVCODE.BIN.\n", dev->name); return; } if(lp->board==DAYNA) { /* * We must wait for a status response * with the DAYNA board. */ while(++i<65536) { if((inb(ioaddr+DAYNA_CARD_STATUS)&3)==1) break; } if(i==65536) return; } /* * Upload the firmware and kick. Byte-by-byte works nicely here. */ i=0; length = ltf->length; while(length--) { outb(ltf->data[i], ioaddr); i++; } if(cops_debug > 1) printk("%s: Uploaded firmware - %d bytes of %d bytes.\n", dev->name, i, ltf->length); if(lp->board==DAYNA) /* Tell Dayna to run the firmware code. */ outb(1, ioaddr+DAYNA_INT_CARD); else /* Tell Tang to run the firmware code. */ inb(ioaddr); if(lp->board==TANGENT) { tangent_wait_reset(ioaddr); inb(ioaddr); /* Clear initial ready signal. */ } } /* * Get the LocalTalk Nodeid from the card. We can suggest * any nodeid 1-254. The card will try and get that exact * address else we can specify 0 as the nodeid and the card * will autoprobe for a nodeid. */ static int cops_nodeid (struct net_device *dev, int nodeid) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; if(lp->board == DAYNA) { /* Empty any pending adapter responses. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupts. */ if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); /* Kick any packets waiting. */ schedule(); } outb(2, ioaddr); /* Output command packet length as 2. */ outb(0, ioaddr); outb(LAP_INIT, ioaddr); /* Send LAP_INIT command byte. */ outb(nodeid, ioaddr); /* Suggest node address. */ } if(lp->board == TANGENT) { /* Empty any pending adapter responses. */ while(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear interrupt. */ cops_rx(dev); /* Kick out packets waiting. */ schedule(); } /* Not sure what Tangent does if nodeid picked is used. */ if(nodeid == 0) /* Seed. */ nodeid = jiffies&0xFF; /* Get a random try */ outb(2, ioaddr); /* Command length LSB */ outb(0, ioaddr); /* Command length MSB */ outb(LAP_INIT, ioaddr); /* Send LAP_INIT byte */ outb(nodeid, ioaddr); /* LAP address hint. */ outb(0xFF, ioaddr); /* Int. level to use */ } lp->node_acquire=0; /* Set nodeid holder to 0. */ while(lp->node_acquire==0) /* Get *True* nodeid finally. */ { outb(0, ioaddr+COPS_CLEAR_INT); /* Clear any interrupt. */ if(lp->board == DAYNA) { if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ } if(lp->board == TANGENT) { if(inb(ioaddr+TANG_CARD_STATUS)&TANG_RX_READY) cops_rx(dev); /* Grab the nodeid put in lp->node_acquire. */ } schedule(); } if(cops_debug > 1) printk(KERN_DEBUG "%s: Node ID %d has been acquired.\n", dev->name, lp->node_acquire); lp->nodeid=1; /* Set got nodeid to 1. */ return 0; } /* * Poll the Tangent type cards to see if we have work. */ static void cops_poll(unsigned long ltdev) { int ioaddr, status; int boguscount = 0; struct net_device *dev = (struct net_device *)ltdev; del_timer(&cops_timer); if(dev == NULL) return; /* We've been downed */ ioaddr = dev->base_addr; do { status=inb(ioaddr+TANG_CARD_STATUS); if(status & TANG_RX_READY) cops_rx(dev); if(status & TANG_TX_READY) netif_wake_queue(dev); status = inb(ioaddr+TANG_CARD_STATUS); } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); /* poll 20 times per second */ cops_timer.expires = jiffies + HZ/20; add_timer(&cops_timer); } /* * The typical workload of the driver: * Handle the network interface interrupts. */ static irqreturn_t cops_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cops_local *lp; int ioaddr, status; int boguscount = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); if(lp->board==DAYNA) { do { outb(0, ioaddr + COPS_CLEAR_INT); status=inb(ioaddr+DAYNA_CARD_STATUS); if((status&0x03)==DAYNA_RX_REQUEST) cops_rx(dev); netif_wake_queue(dev); } while(++boguscount < 20); } else { do { status=inb(ioaddr+TANG_CARD_STATUS); if(status & TANG_RX_READY) cops_rx(dev); if(status & TANG_TX_READY) netif_wake_queue(dev); status=inb(ioaddr+TANG_CARD_STATUS); } while((++boguscount < 20) && (status&(TANG_RX_READY|TANG_TX_READY))); } return IRQ_HANDLED; } /* * We have a good packet(s), get it/them out of the buffers. */ static void cops_rx(struct net_device *dev) { int pkt_len = 0; int rsp_type = 0; struct sk_buff *skb = NULL; struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = 0; unsigned long flags; spin_lock_irqsave(&lp->lock, flags); if(lp->board==DAYNA) { outb(0, ioaddr); /* Send out Zero length. */ outb(0, ioaddr); outb(DATA_READ, ioaddr); /* Send read command out. */ /* Wait for DMA to turn around. */ while(++boguscount<1000000) { barrier(); if((inb(ioaddr+DAYNA_CARD_STATUS)&0x03)==DAYNA_RX_READY) break; } if(boguscount==1000000) { printk(KERN_WARNING "%s: DMA timed out.\n",dev->name); spin_unlock_irqrestore(&lp->lock, flags); return; } } /* Get response length. */ if(lp->board==DAYNA) pkt_len = inb(ioaddr) & 0xFF; else pkt_len = inb(ioaddr) & 0x00FF; pkt_len |= (inb(ioaddr) << 8); /* Input IO code. */ rsp_type=inb(ioaddr); /* Malloc up new buffer. */ skb = dev_alloc_skb(pkt_len); if(skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; while(pkt_len--) /* Discard packet */ inb(ioaddr); spin_unlock_irqrestore(&lp->lock, flags); return; } skb->dev = dev; skb_put(skb, pkt_len); skb->protocol = htons(ETH_P_LOCALTALK); insb(ioaddr, skb->data, pkt_len); /* Eat the Data */ if(lp->board==DAYNA) outb(1, ioaddr+DAYNA_INT_CARD); /* Interrupt the card */ spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Check for bad response length */ if(pkt_len < 0 || pkt_len > MAX_LLAP_SIZE) { printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n", dev->name, pkt_len); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } /* Set nodeid and then get out. */ if(rsp_type == LAP_INIT_RSP) { /* Nodeid taken from received packet. */ lp->node_acquire = skb->data[0]; dev_kfree_skb_any(skb); return; } /* One last check to make sure we have a good packet. */ if(rsp_type != LAP_RESPONSE) { printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type); dev->stats.tx_errors++; dev_kfree_skb_any(skb); return; } skb_reset_mac_header(skb); /* Point to entire packet. */ skb_pull(skb,3); skb_reset_transport_header(skb); /* Point to data (Skip header). */ /* Update the counters. */ dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Send packet to a higher place. */ netif_rx(skb); } static void cops_timeout(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; dev->stats.tx_errors++; if(lp->board==TANGENT) { if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) printk(KERN_WARNING "%s: No TX complete interrupt.\n", dev->name); } printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name); cops_jumpstart(dev); /* Restart the card. */ dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(dev); } /* * Make the card transmit a LocalTalk packet. */ static netdev_tx_t cops_send_packet(struct sk_buff *skb, struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; unsigned long flags; /* * Block a timer-based transmit from overlapping. */ netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); if(lp->board == DAYNA) /* Wait for adapter transmit buffer. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0) cpu_relax(); if(lp->board == TANGENT) /* Wait for adapter transmit buffer. */ while((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0) cpu_relax(); /* Output IO length. */ outb(skb->len, ioaddr); if(lp->board == DAYNA) outb(skb->len >> 8, ioaddr); else outb((skb->len >> 8)&0x0FF, ioaddr); /* Output IO code. */ outb(LAP_WRITE, ioaddr); if(lp->board == DAYNA) /* Check the transmit buffer again. */ while((inb(ioaddr+DAYNA_CARD_STATUS)&DAYNA_TX_READY)==0); outsb(ioaddr, skb->data, skb->len); /* Send out the data. */ if(lp->board==DAYNA) /* Dayna requires you kick the card */ outb(1, ioaddr+DAYNA_INT_CARD); spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */ /* Done sending packet, update counters and cleanup. */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dev_kfree_skb (skb); return NETDEV_TX_OK; } /* * Dummy function to keep the Appletalk layer happy. */ static void set_multicast_list(struct net_device *dev) { if(cops_debug >= 3) printk("%s: set_multicast_list executed\n", dev->name); } /* * System ioctls for the COPS LocalTalk card. */ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct cops_local *lp = netdev_priv(dev); struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr; struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr; switch(cmd) { case SIOCSIFADDR: /* Get and set the nodeid and network # atalkd wants. */ cops_nodeid(dev, sa->sat_addr.s_node); aa->s_net = sa->sat_addr.s_net; aa->s_node = lp->node_acquire; /* Set broardcast address. */ dev->broadcast[0] = 0xFF; /* Set hardware address. */ dev->dev_addr[0] = aa->s_node; dev->addr_len = 1; return 0; case SIOCGIFADDR: sa->sat_addr.s_net = aa->s_net; sa->sat_addr.s_node = aa->s_node; return 0; default: return -EOPNOTSUPP; } } /* * The inverse routine to cops_open(). */ static int cops_close(struct net_device *dev) { struct cops_local *lp = netdev_priv(dev); /* If we were running polled, yank the timer. */ if(lp->board==TANGENT && dev->irq==0) del_timer(&cops_timer); netif_stop_queue(dev); return 0; } #ifdef MODULE static struct net_device *cops_dev; MODULE_LICENSE("GPL"); module_param(io, int, 0); module_param(irq, int, 0); module_param(board_type, int, 0); static int __init cops_module_init(void) { if (io == 0) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); if (IS_ERR(cops_dev)) return PTR_ERR(cops_dev); return 0; } static void __exit cops_module_exit(void) { unregister_netdev(cops_dev); cleanup_card(cops_dev); free_netdev(cops_dev); } module_init(cops_module_init); module_exit(cops_module_exit); #endif /* MODULE */
gpl-2.0
NoelMacwan/Kernel-NanHu-11.3.A.1.39
arch/x86/kernel/mca_32.c
4693
12764
/* * Written by Martin Kolinek, February 1996 * * Changes: * * Chris Beauregard July 28th, 1996 * - Fixed up integrated SCSI detection * * Chris Beauregard August 3rd, 1996 * - Made mca_info local * - Made integrated registers accessible through standard function calls * - Added name field * - More sanity checking * * Chris Beauregard August 9th, 1996 * - Rewrote /proc/mca * * Chris Beauregard January 7th, 1997 * - Added basic NMI-processing * - Added more information to mca_info structure * * David Weinehall October 12th, 1998 * - Made a lot of cleaning up in the source * - Added use of save_flags / restore_flags * - Added the 'driver_loaded' flag in MCA_adapter * - Added an alternative implemention of ZP Gu's mca_find_unused_adapter * * David Weinehall March 24th, 1999 * - Fixed the output of 'Driver Installed' in /proc/mca/pos * - Made the Integrated Video & SCSI show up even if they have id 0000 * * Alexander Viro November 9th, 1999 * - Switched to regular procfs methods * * Alfred Arnold & David Weinehall August 23rd, 2000 * - Added support for Planar POS-registers */ #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mca.h> #include <linux/kprobes.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/proc_fs.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/ioport.h> #include <asm/uaccess.h> #include <linux/init.h> static unsigned char which_scsi; int MCA_bus; EXPORT_SYMBOL(MCA_bus); /* * Motherboard register spinlock. Untested on SMP at the moment, but * are there any MCA SMP boxes? * * Yes - Alan */ static DEFINE_SPINLOCK(mca_lock); /* Build the status info for the adapter */ static void mca_configure_adapter_status(struct mca_device *mca_dev) { mca_dev->status = MCA_ADAPTER_NONE; mca_dev->pos_id = mca_dev->pos[0] + (mca_dev->pos[1] << 8); if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) { /* * id = 0x0000 usually indicates hardware failure, * however, ZP Gu (zpg@castle.net> reports that his 9556 * has 0x0000 as id and everything still works. There * also seem to be an adapter with id = 0x0000; the * NCR Parallel Bus Memory Card. Until this is confirmed, * however, this code will stay. */ mca_dev->status = MCA_ADAPTER_ERROR; return; } else if (mca_dev->pos_id != 0xffff) { /* * 0xffff usually indicates that there's no adapter, * however, some integrated adapters may have 0xffff as * their id and still be valid. Examples are on-board * VGA of the 55sx, the integrated SCSI of the 56 & 57, * and possibly also the 95 ULTIMEDIA. */ mca_dev->status = MCA_ADAPTER_NORMAL; } if ((mca_dev->pos_id == 0xffff || mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) { int j; for (j = 2; j < 8; j++) { if (mca_dev->pos[j] != 0xff) { mca_dev->status = MCA_ADAPTER_NORMAL; break; } } } if (!(mca_dev->pos[2] & MCA_ENABLED)) { /* enabled bit is in POS 2 */ mca_dev->status = MCA_ADAPTER_DISABLED; } } /* mca_configure_adapter_status */ /*--------------------------------------------------------------------*/ static struct resource mca_standard_resources[] = { { .start = 0x60, .end = 0x60, .name = "system control port B (MCA)" }, { .start = 0x90, .end = 0x90, .name = "arbitration (MCA)" }, { .start = 0x91, .end = 0x91, .name = "card Select Feedback (MCA)" }, { .start = 0x92, .end = 0x92, .name = "system Control port A (MCA)" }, { .start = 0x94, .end = 0x94, .name = "system board setup (MCA)" }, { .start = 0x96, .end = 0x97, .name = "POS (MCA)" }, { .start = 0x100, .end = 0x107, .name = "POS (MCA)" } }; #define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources) /* * mca_read_and_store_pos - read the POS registers into a memory buffer * @pos: a char pointer to 8 bytes, contains the POS register value on * successful return * * Returns 1 if a card actually exists (i.e. the pos isn't * all 0xff) or 0 otherwise */ static int mca_read_and_store_pos(unsigned char *pos) { int j; int found = 0; for (j = 0; j < 8; j++) { pos[j] = inb_p(MCA_POS_REG(j)); if (pos[j] != 0xff) { /* 0xff all across means no device. 0x00 means * something's broken, but a device is * probably there. However, if you get 0x00 * from a motherboard register it won't matter * what we find. For the record, on the * 57SLC, the integrated SCSI adapter has * 0xffff for the adapter ID, but nonzero for * other registers. */ found = 1; } } return found; } static unsigned char mca_pc_read_pos(struct mca_device *mca_dev, int reg) { unsigned char byte; unsigned long flags; if (reg < 0 || reg >= 8) return 0; spin_lock_irqsave(&mca_lock, flags); if (mca_dev->pos_register) { /* Disable adapter setup, enable motherboard setup */ outb_p(0, MCA_ADAPTER_SETUP_REG); outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG); byte = inb_p(MCA_POS_REG(reg)); outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); } else { /* Make sure motherboard setup is off */ outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); /* Read the appropriate register */ outb_p(0x8|(mca_dev->slot & 0xf), MCA_ADAPTER_SETUP_REG); byte = inb_p(MCA_POS_REG(reg)); outb_p(0, MCA_ADAPTER_SETUP_REG); } spin_unlock_irqrestore(&mca_lock, flags); mca_dev->pos[reg] = byte; return byte; } static void mca_pc_write_pos(struct mca_device *mca_dev, int reg, unsigned char byte) { unsigned long flags; if (reg < 0 || reg >= 8) return; spin_lock_irqsave(&mca_lock, flags); /* Make sure motherboard setup is off */ outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); /* Read in the appropriate register */ outb_p(0x8|(mca_dev->slot&0xf), MCA_ADAPTER_SETUP_REG); outb_p(byte, MCA_POS_REG(reg)); outb_p(0, MCA_ADAPTER_SETUP_REG); spin_unlock_irqrestore(&mca_lock, flags); /* Update the global register list, while we have the byte */ mca_dev->pos[reg] = byte; } /* for the primary MCA bus, we have identity transforms */ static int mca_dummy_transform_irq(struct mca_device *mca_dev, int irq) { return irq; } static int mca_dummy_transform_ioport(struct mca_device *mca_dev, int port) { return port; } static void *mca_dummy_transform_memory(struct mca_device *mca_dev, void *mem) { return mem; } static int __init mca_init(void) { unsigned int i, j; struct mca_device *mca_dev; unsigned char pos[8]; short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00}; struct mca_bus *bus; /* * WARNING: Be careful when making changes here. Putting an adapter * and the motherboard simultaneously into setup mode may result in * damage to chips (according to The Indispensable PC Hardware Book * by Hans-Peter Messmer). Also, we disable system interrupts (so * that we are not disturbed in the middle of this). */ /* Make sure the MCA bus is present */ if (mca_system_init()) { printk(KERN_ERR "MCA bus system initialisation failed\n"); return -ENODEV; } if (!MCA_bus) return -ENODEV; printk(KERN_INFO "Micro Channel bus detected.\n"); /* All MCA systems have at least a primary bus */ bus = mca_attach_bus(MCA_PRIMARY_BUS); if (!bus) goto out_nomem; bus->default_dma_mask = 0xffffffffLL; bus->f.mca_write_pos = mca_pc_write_pos; bus->f.mca_read_pos = mca_pc_read_pos; bus->f.mca_transform_irq = mca_dummy_transform_irq; bus->f.mca_transform_ioport = mca_dummy_transform_ioport; bus->f.mca_transform_memory = mca_dummy_transform_memory; /* get the motherboard device */ mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL); if (unlikely(!mca_dev)) goto out_nomem; /* * We do not expect many MCA interrupts during initialization, * but let us be safe: */ spin_lock_irq(&mca_lock); /* Make sure adapter setup is off */ outb_p(0, MCA_ADAPTER_SETUP_REG); /* Read motherboard POS registers */ mca_dev->pos_register = 0x7f; outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG); mca_dev->name[0] = 0; mca_read_and_store_pos(mca_dev->pos); mca_configure_adapter_status(mca_dev); /* fake POS and slot for a motherboard */ mca_dev->pos_id = MCA_MOTHERBOARD_POS; mca_dev->slot = MCA_MOTHERBOARD; mca_register_device(MCA_PRIMARY_BUS, mca_dev); mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); if (unlikely(!mca_dev)) goto out_unlock_nomem; /* Put motherboard into video setup mode, read integrated video * POS registers, and turn motherboard setup off. */ mca_dev->pos_register = 0xdf; outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG); mca_dev->name[0] = 0; mca_read_and_store_pos(mca_dev->pos); mca_configure_adapter_status(mca_dev); /* fake POS and slot for the integrated video */ mca_dev->pos_id = MCA_INTEGVIDEO_POS; mca_dev->slot = MCA_INTEGVIDEO; mca_register_device(MCA_PRIMARY_BUS, mca_dev); /* * Put motherboard into scsi setup mode, read integrated scsi * POS registers, and turn motherboard setup off. * * It seems there are two possible SCSI registers. Martin says that * for the 56,57, 0xf7 is the one, but fails on the 76. * Alfredo (apena@vnet.ibm.com) says * 0xfd works on his machine. We'll try both of them. I figure it's * a good bet that only one could be valid at a time. This could * screw up though if one is used for something else on the other * machine. */ for (i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) { outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG); if (mca_read_and_store_pos(pos)) break; } if (which_scsi) { /* found a scsi card */ mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); if (unlikely(!mca_dev)) goto out_unlock_nomem; for (j = 0; j < 8; j++) mca_dev->pos[j] = pos[j]; mca_configure_adapter_status(mca_dev); /* fake POS and slot for integrated SCSI controller */ mca_dev->pos_id = MCA_INTEGSCSI_POS; mca_dev->slot = MCA_INTEGSCSI; mca_dev->pos_register = which_scsi; mca_register_device(MCA_PRIMARY_BUS, mca_dev); } /* Turn off motherboard setup */ outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG); /* * Now loop over MCA slots: put each adapter into setup mode, and * read its POS registers. Then put adapter setup off. */ for (i = 0; i < MCA_MAX_SLOT_NR; i++) { outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG); if (!mca_read_and_store_pos(pos)) continue; mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC); if (unlikely(!mca_dev)) goto out_unlock_nomem; for (j = 0; j < 8; j++) mca_dev->pos[j] = pos[j]; mca_dev->driver_loaded = 0; mca_dev->slot = i; mca_dev->pos_register = 0; mca_configure_adapter_status(mca_dev); mca_register_device(MCA_PRIMARY_BUS, mca_dev); } outb_p(0, MCA_ADAPTER_SETUP_REG); /* Enable interrupts and return memory start */ spin_unlock_irq(&mca_lock); for (i = 0; i < MCA_STANDARD_RESOURCES; i++) request_resource(&ioport_resource, mca_standard_resources + i); mca_do_proc_init(); return 0; out_unlock_nomem: spin_unlock_irq(&mca_lock); out_nomem: printk(KERN_EMERG "Failed memory allocation in MCA setup!\n"); return -ENOMEM; } subsys_initcall(mca_init); /*--------------------------------------------------------------------*/ static __kprobes void mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag) { int slot = mca_dev->slot; if (slot == MCA_INTEGSCSI) { printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n", mca_dev->name); } else if (slot == MCA_INTEGVIDEO) { printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n", mca_dev->name); } else if (slot == MCA_MOTHERBOARD) { printk(KERN_CRIT "NMI: caused by motherboard (%s)\n", mca_dev->name); } /* More info available in POS 6 and 7? */ if (check_flag) { unsigned char pos6, pos7; pos6 = mca_device_read_pos(mca_dev, 6); pos7 = mca_device_read_pos(mca_dev, 7); printk(KERN_CRIT "NMI: POS 6 = 0x%x, POS 7 = 0x%x\n", pos6, pos7); } } /* mca_handle_nmi_slot */ /*--------------------------------------------------------------------*/ static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data) { struct mca_device *mca_dev = to_mca_device(dev); unsigned char pos5; pos5 = mca_device_read_pos(mca_dev, 5); if (!(pos5 & 0x80)) { /* * Bit 7 of POS 5 is reset when this adapter has a hardware * error. Bit 7 it reset if there's error information * available in POS 6 and 7. */ mca_handle_nmi_device(mca_dev, !(pos5 & 0x40)); return 1; } return 0; } void __kprobes mca_handle_nmi(void) { /* * First try - scan the various adapters and see if a specific * adapter was responsible for the error. */ bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); }
gpl-2.0
cooks8/android_kernel_samsung_jf
arch/mips/lantiq/xway/prom-xway.c
4693
1148
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/export.h> #include <linux/clk.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <lantiq_soc.h> #include "../prom.h" #define SOC_DANUBE "Danube" #define SOC_TWINPASS "Twinpass" #define SOC_AR9 "AR9" #define PART_SHIFT 12 #define PART_MASK 0x0FFFFFFF #define REV_SHIFT 28 #define REV_MASK 0xF0000000 void __init ltq_soc_detect(struct ltq_soc_info *i) { i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; switch (i->partnum) { case SOC_ID_DANUBE1: case SOC_ID_DANUBE2: i->name = SOC_DANUBE; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_TWINPASS: i->name = SOC_TWINPASS; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_ARX188: case SOC_ID_ARX168: case SOC_ID_ARX182: i->name = SOC_AR9; i->type = SOC_TYPE_AR9; break; default: unreachable(); break; } }
gpl-2.0
XXMrHyde/android_kernel_motorola_msm8610
arch/sh/kernel/cpu/shmobile/cpuidle.c
4693
3167
/* * arch/sh/kernel/cpu/shmobile/cpuidle.c * * Cpuidle support code for SuperH Mobile * * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/suspend.h> #include <linux/cpuidle.h> #include <linux/export.h> #include <asm/suspend.h> #include <asm/uaccess.h> static unsigned long cpuidle_mode[] = { SUSP_SH_SLEEP, /* regular sleep mode */ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long allowed_mode = SUSP_SH_SLEEP; int requested_state = index; int allowed_state; int k; /* convert allowed mode to allowed state */ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) if (cpuidle_mode[k] == allowed_mode) break; allowed_state = k; /* take the following into account for sleep mode selection: * - allowed_state: best mode allowed by hardware (clock deps) * - requested_state: best mode allowed by software (latencies) */ k = min_t(int, allowed_state, requested_state); sh_mobile_call_standby(cpuidle_mode[k]); return k; } static struct cpuidle_device cpuidle_dev; static struct cpuidle_driver cpuidle_driver = { .name = "sh_idle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, }; void sh_mobile_setup_cpuidle(void) { struct cpuidle_device *dev = &cpuidle_dev; struct cpuidle_driver *drv = &cpuidle_driver; struct cpuidle_state *state; int i; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } i = CPUIDLE_DRIVER_STATE_START; state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); state->exit_latency = 1; state->target_residency = 1 * 2; state->power_usage = 3; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; drv->safe_state_index = i-1; if (sh_mobile_sleep_supported & SUSP_SH_SF) { state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 100; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 2300; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } drv->state_count = i; dev->state_count = i; cpuidle_register_driver(&cpuidle_driver); cpuidle_register_device(dev); }
gpl-2.0
imoseyon/leanKernel-d2vzw
arch/mips/dec/wbflush.c
4693
2110
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/wbflush.h> #include <asm/barrier.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
SlimRoms/kernel_lge_hammerhead
arch/sh/kernel/cpu/shmobile/cpuidle.c
4693
3167
/* * arch/sh/kernel/cpu/shmobile/cpuidle.c * * Cpuidle support code for SuperH Mobile * * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/suspend.h> #include <linux/cpuidle.h> #include <linux/export.h> #include <asm/suspend.h> #include <asm/uaccess.h> static unsigned long cpuidle_mode[] = { SUSP_SH_SLEEP, /* regular sleep mode */ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long allowed_mode = SUSP_SH_SLEEP; int requested_state = index; int allowed_state; int k; /* convert allowed mode to allowed state */ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) if (cpuidle_mode[k] == allowed_mode) break; allowed_state = k; /* take the following into account for sleep mode selection: * - allowed_state: best mode allowed by hardware (clock deps) * - requested_state: best mode allowed by software (latencies) */ k = min_t(int, allowed_state, requested_state); sh_mobile_call_standby(cpuidle_mode[k]); return k; } static struct cpuidle_device cpuidle_dev; static struct cpuidle_driver cpuidle_driver = { .name = "sh_idle", .owner = THIS_MODULE, .en_core_tk_irqen = 1, }; void sh_mobile_setup_cpuidle(void) { struct cpuidle_device *dev = &cpuidle_dev; struct cpuidle_driver *drv = &cpuidle_driver; struct cpuidle_state *state; int i; for (i = 0; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } i = CPUIDLE_DRIVER_STATE_START; state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); state->exit_latency = 1; state->target_residency = 1 * 2; state->power_usage = 3; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; drv->safe_state_index = i-1; if (sh_mobile_sleep_supported & SUSP_SH_SF) { state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 100; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { state = &drv->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 2300; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } drv->state_count = i; dev->state_count = i; cpuidle_register_driver(&cpuidle_driver); cpuidle_register_device(dev); }
gpl-2.0
slz/arco-samsung-kernel-msm7x30
arch/mips/lantiq/xway/prom-xway.c
4693
1148
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/export.h> #include <linux/clk.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <lantiq_soc.h> #include "../prom.h" #define SOC_DANUBE "Danube" #define SOC_TWINPASS "Twinpass" #define SOC_AR9 "AR9" #define PART_SHIFT 12 #define PART_MASK 0x0FFFFFFF #define REV_SHIFT 28 #define REV_MASK 0xF0000000 void __init ltq_soc_detect(struct ltq_soc_info *i) { i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; switch (i->partnum) { case SOC_ID_DANUBE1: case SOC_ID_DANUBE2: i->name = SOC_DANUBE; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_TWINPASS: i->name = SOC_TWINPASS; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_ARX188: case SOC_ID_ARX168: case SOC_ID_ARX182: i->name = SOC_AR9; i->type = SOC_TYPE_AR9; break; default: unreachable(); break; } }
gpl-2.0
Split-Screen/android_kernel_samsung_msm7x30-common
drivers/staging/omapdrm/omap_encoder.c
4949
4581
/* * drivers/staging/omapdrm/omap_encoder.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "omap_drv.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" /* * encoder funcs */ #define to_omap_encoder(x) container_of(x, struct omap_encoder, base) struct omap_encoder { struct drm_encoder base; struct omap_overlay_manager *mgr; }; static void omap_encoder_destroy(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); DBG("%s", omap_encoder->mgr->name); drm_encoder_cleanup(encoder); kfree(omap_encoder); } static void omap_encoder_dpms(struct drm_encoder *encoder, int mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); DBG("%s: %d", omap_encoder->mgr->name, mode); } static bool omap_encoder_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); DBG("%s", omap_encoder->mgr->name); return true; } static void omap_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct drm_device *dev = encoder->dev; struct omap_drm_private *priv = dev->dev_private; int i; mode = adjusted_mode; DBG("%s: set mode: %dx%d", omap_encoder->mgr->name, mode->hdisplay, mode->vdisplay); for (i = 0; i < priv->num_connectors; i++) { struct drm_connector *connector = priv->connectors[i]; if (connector->encoder == encoder) { omap_connector_mode_set(connector, mode); } } } static void omap_encoder_prepare(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; DBG("%s", omap_encoder->mgr->name); encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } static void omap_encoder_commit(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; DBG("%s", omap_encoder->mgr->name); omap_encoder->mgr->apply(omap_encoder->mgr); encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); } static const struct drm_encoder_funcs omap_encoder_funcs = { .destroy = omap_encoder_destroy, }; static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { .dpms = omap_encoder_dpms, .mode_fixup = omap_encoder_mode_fixup, .mode_set = omap_encoder_mode_set, .prepare = omap_encoder_prepare, .commit = omap_encoder_commit, }; struct omap_overlay_manager *omap_encoder_get_manager( struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); return omap_encoder->mgr; } /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, struct omap_overlay_manager *mgr) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; struct omap_overlay_manager_info info; int ret; DBG("%s", mgr->name); omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL); if (!omap_encoder) { dev_err(dev->dev, "could not allocate encoder\n"); goto fail; } omap_encoder->mgr = mgr; encoder = &omap_encoder->base; drm_encoder_init(dev, encoder, &omap_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs); mgr->get_manager_info(mgr, &info); /* TODO: fix hard-coded setup.. */ info.default_color = 0x00000000; info.trans_key = 0x00000000; info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST; info.trans_enabled = false; ret = mgr->set_manager_info(mgr, &info); if (ret) { dev_err(dev->dev, "could not set manager info\n"); goto fail; } ret = mgr->apply(mgr); if (ret) { dev_err(dev->dev, "could not apply\n"); goto fail; } return encoder; fail: if (encoder) { omap_encoder_destroy(encoder); } return NULL; }
gpl-2.0
VRToxin-AOSP/android_kernel_moto_shamu
drivers/media/dvb-frontends/dibx000_common.c
9557
13249
#include <linux/i2c.h> #include <linux/mutex.h> #include <linux/module.h> #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0) static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val) { int ret; if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } mst->i2c_write_buffer[0] = (reg >> 8) & 0xff; mst->i2c_write_buffer[1] = reg & 0xff; mst->i2c_write_buffer[2] = (val >> 8) & 0xff; mst->i2c_write_buffer[3] = val & 0xff; memset(mst->msg, 0, sizeof(struct i2c_msg)); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].flags = 0; mst->msg[0].buf = mst->i2c_write_buffer; mst->msg[0].len = 4; ret = i2c_transfer(mst->i2c_adap, mst->msg, 1) != 1 ? -EREMOTEIO : 0; mutex_unlock(&mst->i2c_buffer_lock); return ret; } static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg) { u16 ret; if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } mst->i2c_write_buffer[0] = reg >> 8; mst->i2c_write_buffer[1] = reg & 0xff; memset(mst->msg, 0, 2 * sizeof(struct i2c_msg)); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].flags = 0; mst->msg[0].buf = mst->i2c_write_buffer; mst->msg[0].len = 2; mst->msg[1].addr = mst->i2c_addr; mst->msg[1].flags = I2C_M_RD; mst->msg[1].buf = mst->i2c_read_buffer; mst->msg[1].len = 2; if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2) dprintk("i2c read error on %d", reg); ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1]; mutex_unlock(&mst->i2c_buffer_lock); return ret; } static int dibx000_is_i2c_done(struct dibx000_i2c_master *mst) { int i = 100; u16 status; while (((status = dibx000_read_word(mst, mst->base_reg + 2)) & 0x0100) == 0 && --i > 0) ; /* i2c timed out */ if (i == 0) return -EREMOTEIO; /* no acknowledge */ if ((status & 0x0080) == 0) return -EREMOTEIO; return 0; } static int dibx000_master_i2c_write(struct dibx000_i2c_master *mst, struct i2c_msg *msg, u8 stop) { u16 data; u16 da; u16 i; u16 txlen = msg->len, len; const u8 *b = msg->buf; while (txlen) { dibx000_read_word(mst, mst->base_reg + 2); len = txlen > 8 ? 8 : txlen; for (i = 0; i < len; i += 2) { data = *b++ << 8; if (i+1 < len) data |= *b++; dibx000_write_word(mst, mst->base_reg, data); } da = (((u8) (msg->addr)) << 9) | (1 << 8) | (1 << 7) | (0 << 6) | (0 << 5) | ((len & 0x7) << 2) | (0 << 1) | (0 << 0); if (txlen == msg->len) da |= 1 << 5; /* start */ if (txlen-len == 0 && stop) da |= 1 << 6; /* stop */ dibx000_write_word(mst, mst->base_reg+1, da); if (dibx000_is_i2c_done(mst) != 0) return -EREMOTEIO; txlen -= len; } return 0; } static int dibx000_master_i2c_read(struct dibx000_i2c_master *mst, struct i2c_msg *msg) { u16 da; u8 *b = msg->buf; u16 rxlen = msg->len, len; while (rxlen) { len = rxlen > 8 ? 8 : rxlen; da = (((u8) (msg->addr)) << 9) | (1 << 8) | (1 << 7) | (0 << 6) | (0 << 5) | ((len & 0x7) << 2) | (1 << 1) | (0 << 0); if (rxlen == msg->len) da |= 1 << 5; /* start */ if (rxlen-len == 0) da |= 1 << 6; /* stop */ dibx000_write_word(mst, mst->base_reg+1, da); if (dibx000_is_i2c_done(mst) != 0) return -EREMOTEIO; rxlen -= len; while (len) { da = dibx000_read_word(mst, mst->base_reg); *b++ = (da >> 8) & 0xff; len--; if (len >= 1) { *b++ = da & 0xff; len--; } } } return 0; } int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); if (mst->device_rev < DIB7000MC && speed < 235) speed = 235; return dibx000_write_word(mst, mst->base_reg + 3, (u16)(60000 / speed)); } EXPORT_SYMBOL(dibx000_i2c_set_speed); static u32 dibx000_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf) { if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) { dprintk("selecting interface: %d", intf); mst->selected_interface = intf; return dibx000_write_word(mst, mst->base_reg + 4, intf); } return 0; } static int dibx000_i2c_master_xfer_gpio12(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int msg_index; int ret = 0; dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_1_2); for (msg_index = 0; msg_index < num; msg_index++) { if (msg[msg_index].flags & I2C_M_RD) { ret = dibx000_master_i2c_read(mst, &msg[msg_index]); if (ret != 0) return 0; } else { ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); if (ret != 0) return 0; } } return num; } static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int msg_index; int ret = 0; dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_3_4); for (msg_index = 0; msg_index < num; msg_index++) { if (msg[msg_index].flags & I2C_M_RD) { ret = dibx000_master_i2c_read(mst, &msg[msg_index]); if (ret != 0) return 0; } else { ret = dibx000_master_i2c_write(mst, &msg[msg_index], 1); if (ret != 0) return 0; } } return num; } static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = { .master_xfer = dibx000_i2c_master_xfer_gpio12, .functionality = dibx000_i2c_func, }; static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = { .master_xfer = dibx000_i2c_master_xfer_gpio34, .functionality = dibx000_i2c_func, }; static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4], u8 addr, int onoff) { u16 val; if (onoff) val = addr << 8; // bit 7 = use master or not, if 0, the gate is open else val = 1 << 7; if (mst->device_rev > DIB7000) val <<= 1; tx[0] = (((mst->base_reg + 1) >> 8) & 0xff); tx[1] = ((mst->base_reg + 1) & 0xff); tx[2] = val >> 8; tx[3] = val & 0xff; return 0; } static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int ret; if (num > 32) { dprintk("%s: too much I2C message to be transmitted (%i).\ Maximum is 32", __func__, num); return -ENOMEM; } dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7); if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); /* open the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].buf = &mst->i2c_write_buffer[0]; mst->msg[0].len = 4; memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); /* close the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0); mst->msg[num + 1].addr = mst->i2c_addr; mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; mst->msg[num + 1].len = 4; ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO); mutex_unlock(&mst->i2c_buffer_lock); return ret; } static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = { .master_xfer = dibx000_i2c_gated_gpio67_xfer, .functionality = dibx000_i2c_func, }; static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); int ret; if (num > 32) { dprintk("%s: too much I2C message to be transmitted (%i).\ Maximum is 32", __func__, num); return -ENOMEM; } dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num)); /* open the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[0], msg[0].addr, 1); mst->msg[0].addr = mst->i2c_addr; mst->msg[0].buf = &mst->i2c_write_buffer[0]; mst->msg[0].len = 4; memcpy(&mst->msg[1], msg, sizeof(struct i2c_msg) * num); /* close the gate */ dibx000_i2c_gate_ctrl(mst, &mst->i2c_write_buffer[4], 0, 0); mst->msg[num + 1].addr = mst->i2c_addr; mst->msg[num + 1].buf = &mst->i2c_write_buffer[4]; mst->msg[num + 1].len = 4; ret = (i2c_transfer(mst->i2c_adap, mst->msg, 2 + num) == 2 + num ? num : -EIO); mutex_unlock(&mst->i2c_buffer_lock); return ret; } static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = { .master_xfer = dibx000_i2c_gated_tuner_xfer, .functionality = dibx000_i2c_func, }; struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf, int gating) { struct i2c_adapter *i2c = NULL; switch (intf) { case DIBX000_I2C_INTERFACE_TUNER: if (gating) i2c = &mst->gated_tuner_i2c_adap; break; case DIBX000_I2C_INTERFACE_GPIO_1_2: if (!gating) i2c = &mst->master_i2c_adap_gpio12; break; case DIBX000_I2C_INTERFACE_GPIO_3_4: if (!gating) i2c = &mst->master_i2c_adap_gpio34; break; case DIBX000_I2C_INTERFACE_GPIO_6_7: if (gating) i2c = &mst->master_i2c_adap_gpio67; break; default: printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n"); break; } return i2c; } EXPORT_SYMBOL(dibx000_get_i2c_adapter); void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst) { /* initialize the i2c-master by closing the gate */ u8 tx[4]; struct i2c_msg m = {.addr = mst->i2c_addr,.buf = tx,.len = 4 }; dibx000_i2c_gate_ctrl(mst, tx, 0, 0); i2c_transfer(mst->i2c_adap, &m, 1); mst->selected_interface = 0xff; // the first time force a select of the I2C dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); } EXPORT_SYMBOL(dibx000_reset_i2c_master); static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct i2c_algorithm *algo, const char *name, struct dibx000_i2c_master *mst) { strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); i2c_adap->algo = algo; i2c_adap->algo_data = NULL; i2c_set_adapdata(i2c_adap, mst); if (i2c_add_adapter(i2c_adap) < 0) return -ENODEV; return 0; } int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev, struct i2c_adapter *i2c_adap, u8 i2c_addr) { int ret; mutex_init(&mst->i2c_buffer_lock); if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } memset(mst->msg, 0, sizeof(struct i2c_msg)); mst->msg[0].addr = i2c_addr >> 1; mst->msg[0].flags = 0; mst->msg[0].buf = mst->i2c_write_buffer; mst->msg[0].len = 4; mst->device_rev = device_rev; mst->i2c_adap = i2c_adap; mst->i2c_addr = i2c_addr >> 1; if (device_rev == DIB7000P || device_rev == DIB8000) mst->base_reg = 1024; else mst->base_reg = 768; mst->gated_tuner_i2c_adap.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo, "DiBX000 tuner I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the tuner i2c_adapter\n"); mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo, "DiBX000 master GPIO12 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo, "DiBX000 master GPIO34 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent; if (i2c_adapter_init (&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo, "DiBX000 master GPIO67 I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the master i2c_adapter\n"); /* initialize the i2c-master by closing the gate */ dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0); ret = (i2c_transfer(i2c_adap, mst->msg, 1) == 1); mutex_unlock(&mst->i2c_buffer_lock); return ret; } EXPORT_SYMBOL(dibx000_init_i2c_master); void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst) { i2c_del_adapter(&mst->gated_tuner_i2c_adap); i2c_del_adapter(&mst->master_i2c_adap_gpio12); i2c_del_adapter(&mst->master_i2c_adap_gpio34); i2c_del_adapter(&mst->master_i2c_adap_gpio67); } EXPORT_SYMBOL(dibx000_exit_i2c_master); u32 systime(void) { struct timespec t; t = current_kernel_time(); return (t.tv_sec * 10000) + (t.tv_nsec / 100000); } EXPORT_SYMBOL(systime); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Common function the DiBcom demodulator family"); MODULE_LICENSE("GPL");
gpl-2.0
rock12/ALPS.L0.MT6580.SMT.DEV.P1.4_K80_KERNEL
drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
10581
15773
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/slab.h> #include "zd_rf.h" #include "zd_usb.h" #include "zd_chip.h" /* This RF programming code is based upon the code found in v2.16.0.0 of the * ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs * for this RF on their website, so we're able to understand more than * usual as to what is going on. Thumbs up for Ubec for doing that. */ /* The 3-wire serial interface provides access to 8 write-only registers. * The data format is a 4 bit register address followed by a 20 bit value. */ #define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff)) /* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth * fractional divide ratio) and 3 (VCO config). * * We configure the RF to produce an interrupt when the PLL is locked onto * the configured frequency. During initialization, we run through a variety * of different VCO configurations on channel 1 until we detect a PLL lock. * When this happens, we remember which VCO configuration produced the lock * and use it later. Actually, we use the configuration *after* the one that * produced the lock, which seems odd, but it works. * * If we do not see a PLL lock on any standard VCO config, we fall back on an * autocal configuration, which has a fixed (as opposed to per-channel) VCO * config and different synth values from the standard set (divide ratio * is still shared with the standard set). */ /* The per-channel synth values for all standard VCO configurations. These get * written to register 1. */ static const u8 uw2453_std_synth[] = { RF_CHANNEL( 1) = 0x47, RF_CHANNEL( 2) = 0x47, RF_CHANNEL( 3) = 0x67, RF_CHANNEL( 4) = 0x67, RF_CHANNEL( 5) = 0x67, RF_CHANNEL( 6) = 0x67, RF_CHANNEL( 7) = 0x57, RF_CHANNEL( 8) = 0x57, RF_CHANNEL( 9) = 0x57, RF_CHANNEL(10) = 0x57, RF_CHANNEL(11) = 0x77, RF_CHANNEL(12) = 0x77, RF_CHANNEL(13) = 0x77, RF_CHANNEL(14) = 0x4f, }; /* This table stores the synthesizer fractional divide ratio for *all* VCO * configurations (both standard and autocal). These get written to register 2. */ static const u16 uw2453_synth_divide[] = { RF_CHANNEL( 1) = 0x999, RF_CHANNEL( 2) = 0x99b, RF_CHANNEL( 3) = 0x998, RF_CHANNEL( 4) = 0x99a, RF_CHANNEL( 5) = 0x999, RF_CHANNEL( 6) = 0x99b, RF_CHANNEL( 7) = 0x998, RF_CHANNEL( 8) = 0x99a, RF_CHANNEL( 9) = 0x999, RF_CHANNEL(10) = 0x99b, RF_CHANNEL(11) = 0x998, RF_CHANNEL(12) = 0x99a, RF_CHANNEL(13) = 0x999, RF_CHANNEL(14) = 0xccc, }; /* Here is the data for all the standard VCO configurations. We shrink our * table a little by observing that both channels in a consecutive pair share * the same value. We also observe that the high 4 bits ([0:3] in the specs) * are all 'Reserved' and are always set to 0x4 - we chop them off in the data * below. */ #define CHAN_TO_PAIRIDX(a) ((a - 1) / 2) #define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)] static const u16 uw2453_std_vco_cfg[][7] = { { /* table 1 */ RF_CHANPAIR( 1, 2) = 0x664d, RF_CHANPAIR( 3, 4) = 0x604d, RF_CHANPAIR( 5, 6) = 0x6675, RF_CHANPAIR( 7, 8) = 0x6475, RF_CHANPAIR( 9, 10) = 0x6655, RF_CHANPAIR(11, 12) = 0x6455, RF_CHANPAIR(13, 14) = 0x6665, }, { /* table 2 */ RF_CHANPAIR( 1, 2) = 0x666d, RF_CHANPAIR( 3, 4) = 0x606d, RF_CHANPAIR( 5, 6) = 0x664d, RF_CHANPAIR( 7, 8) = 0x644d, RF_CHANPAIR( 9, 10) = 0x6675, RF_CHANPAIR(11, 12) = 0x6475, RF_CHANPAIR(13, 14) = 0x6655, }, { /* table 3 */ RF_CHANPAIR( 1, 2) = 0x665d, RF_CHANPAIR( 3, 4) = 0x605d, RF_CHANPAIR( 5, 6) = 0x666d, RF_CHANPAIR( 7, 8) = 0x646d, RF_CHANPAIR( 9, 10) = 0x664d, RF_CHANPAIR(11, 12) = 0x644d, RF_CHANPAIR(13, 14) = 0x6675, }, { /* table 4 */ RF_CHANPAIR( 1, 2) = 0x667d, RF_CHANPAIR( 3, 4) = 0x607d, RF_CHANPAIR( 5, 6) = 0x665d, RF_CHANPAIR( 7, 8) = 0x645d, RF_CHANPAIR( 9, 10) = 0x666d, RF_CHANPAIR(11, 12) = 0x646d, RF_CHANPAIR(13, 14) = 0x664d, }, { /* table 5 */ RF_CHANPAIR( 1, 2) = 0x6643, RF_CHANPAIR( 3, 4) = 0x6043, RF_CHANPAIR( 5, 6) = 0x667d, RF_CHANPAIR( 7, 8) = 0x647d, RF_CHANPAIR( 9, 10) = 0x665d, RF_CHANPAIR(11, 12) = 0x645d, RF_CHANPAIR(13, 14) = 0x666d, }, { /* table 6 */ RF_CHANPAIR( 1, 2) = 0x6663, RF_CHANPAIR( 3, 4) = 0x6063, RF_CHANPAIR( 5, 6) = 0x6643, RF_CHANPAIR( 7, 8) = 0x6443, RF_CHANPAIR( 9, 10) = 0x667d, RF_CHANPAIR(11, 12) = 0x647d, RF_CHANPAIR(13, 14) = 0x665d, }, { /* table 7 */ RF_CHANPAIR( 1, 2) = 0x6653, RF_CHANPAIR( 3, 4) = 0x6053, RF_CHANPAIR( 5, 6) = 0x6663, RF_CHANPAIR( 7, 8) = 0x6463, RF_CHANPAIR( 9, 10) = 0x6643, RF_CHANPAIR(11, 12) = 0x6443, RF_CHANPAIR(13, 14) = 0x667d, }, { /* table 8 */ RF_CHANPAIR( 1, 2) = 0x6673, RF_CHANPAIR( 3, 4) = 0x6073, RF_CHANPAIR( 5, 6) = 0x6653, RF_CHANPAIR( 7, 8) = 0x6453, RF_CHANPAIR( 9, 10) = 0x6663, RF_CHANPAIR(11, 12) = 0x6463, RF_CHANPAIR(13, 14) = 0x6643, }, { /* table 9 */ RF_CHANPAIR( 1, 2) = 0x664b, RF_CHANPAIR( 3, 4) = 0x604b, RF_CHANPAIR( 5, 6) = 0x6673, RF_CHANPAIR( 7, 8) = 0x6473, RF_CHANPAIR( 9, 10) = 0x6653, RF_CHANPAIR(11, 12) = 0x6453, RF_CHANPAIR(13, 14) = 0x6663, }, { /* table 10 */ RF_CHANPAIR( 1, 2) = 0x666b, RF_CHANPAIR( 3, 4) = 0x606b, RF_CHANPAIR( 5, 6) = 0x664b, RF_CHANPAIR( 7, 8) = 0x644b, RF_CHANPAIR( 9, 10) = 0x6673, RF_CHANPAIR(11, 12) = 0x6473, RF_CHANPAIR(13, 14) = 0x6653, }, { /* table 11 */ RF_CHANPAIR( 1, 2) = 0x665b, RF_CHANPAIR( 3, 4) = 0x605b, RF_CHANPAIR( 5, 6) = 0x666b, RF_CHANPAIR( 7, 8) = 0x646b, RF_CHANPAIR( 9, 10) = 0x664b, RF_CHANPAIR(11, 12) = 0x644b, RF_CHANPAIR(13, 14) = 0x6673, }, }; /* The per-channel synth values for autocal. These get written to register 1. */ static const u16 uw2453_autocal_synth[] = { RF_CHANNEL( 1) = 0x6847, RF_CHANNEL( 2) = 0x6847, RF_CHANNEL( 3) = 0x6867, RF_CHANNEL( 4) = 0x6867, RF_CHANNEL( 5) = 0x6867, RF_CHANNEL( 6) = 0x6867, RF_CHANNEL( 7) = 0x6857, RF_CHANNEL( 8) = 0x6857, RF_CHANNEL( 9) = 0x6857, RF_CHANNEL(10) = 0x6857, RF_CHANNEL(11) = 0x6877, RF_CHANNEL(12) = 0x6877, RF_CHANNEL(13) = 0x6877, RF_CHANNEL(14) = 0x684f, }; /* The VCO configuration for autocal (all channels) */ static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662; /* TX gain settings. The array index corresponds to the TX power integration * values found in the EEPROM. The values get written to register 7. */ static u32 uw2453_txgain[] = { [0x00] = 0x0e313, [0x01] = 0x0fb13, [0x02] = 0x0e093, [0x03] = 0x0f893, [0x04] = 0x0ea93, [0x05] = 0x1f093, [0x06] = 0x1f493, [0x07] = 0x1f693, [0x08] = 0x1f393, [0x09] = 0x1f35b, [0x0a] = 0x1e6db, [0x0b] = 0x1ff3f, [0x0c] = 0x1ffff, [0x0d] = 0x361d7, [0x0e] = 0x37fbf, [0x0f] = 0x3ff8b, [0x10] = 0x3ff33, [0x11] = 0x3fb3f, [0x12] = 0x3ffff, }; /* RF-specific structure */ struct uw2453_priv { /* index into synth/VCO config tables where PLL lock was found * -1 means autocal */ int config; }; #define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv) static int uw2453_synth_set_channel(struct zd_chip *chip, int channel, bool autocal) { int r; int idx = channel - 1; u32 val; if (autocal) val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]); else val = UW2453_REGWRITE(1, uw2453_std_synth[idx]); r = zd_rfwrite_locked(chip, val, RF_RV_BITS); if (r) return r; return zd_rfwrite_locked(chip, UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS); } static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value) { /* vendor driver always sets these upper bits even though the specs say * they are reserved */ u32 val = 0x40000 | value; return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS); } static int uw2453_init_mode(struct zd_chip *chip) { static const u32 rv[] = { UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */ UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */ UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */ UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */ }; return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); } static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel) { u8 int_value = chip->pwr_int_values[channel - 1]; if (int_value >= ARRAY_SIZE(uw2453_txgain)) { dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for " "int value %x on channel %d\n", int_value, channel); return 0; } return zd_rfwrite_locked(chip, UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS); } static int uw2453_init_hw(struct zd_rf *rf) { int i, r; int found_config = -1; u16 intr_status; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 }, { ZD_CR17, 0x28 }, /* 6112 no change */ { ZD_CR23, 0x38 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 }, { ZD_CR27, 0x15 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR33, 0x28 }, { ZD_CR34, 0x30 }, { ZD_CR35, 0x43 }, /* 6112 3e->43 */ { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, { ZD_CR46, 0x92 }, /* 6112 96->92 */ { ZD_CR47, 0x1e }, { ZD_CR48, 0x04 }, /* 5602 Roger */ { ZD_CR49, 0xfa }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR91, 0x00 }, { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x02 }, { ZD_CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */ { ZD_CR102, 0x27 }, { ZD_CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f * 6221 1f->1c */ { ZD_CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */ { ZD_CR109, 0x13 }, { ZD_CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */ { ZD_CR111, 0x13 }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x23 }, /* 6221 27->23 */ { ZD_CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */ { ZD_CR116, 0x24 }, /* 6220 1c->24 */ { ZD_CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */ { ZD_CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */ { ZD_CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */ { ZD_CR120, 0x4f }, { ZD_CR121, 0x1f }, /* 6220 4f->1f */ { ZD_CR122, 0xf0 }, { ZD_CR123, 0x57 }, { ZD_CR125, 0xad }, { ZD_CR126, 0x6c }, { ZD_CR127, 0x03 }, { ZD_CR128, 0x14 }, /* 6302 12->11 */ { ZD_CR129, 0x12 }, /* 6301 10->0f */ { ZD_CR130, 0x10 }, { ZD_CR137, 0x50 }, { ZD_CR138, 0xa8 }, { ZD_CR144, 0xac }, { ZD_CR146, 0x20 }, { ZD_CR252, 0xff }, { ZD_CR253, 0xff }, }; static const u32 rv[] = { UW2453_REGWRITE(4, 0x2b), /* configure receiver gain */ UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */ UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */ UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */ /* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins, * RSSI circuit powered down, reduced RSSI range */ UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */ /* synthesizer configuration for channel 1 */ UW2453_REGWRITE(1, 0x47), UW2453_REGWRITE(2, 0x999), /* disable manual VCO band selection */ UW2453_REGWRITE(3, 0x7602), /* enable manual VCO band selection, configure current level */ UW2453_REGWRITE(3, 0x46063), }; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS); if (r) return r; r = uw2453_init_mode(chip); if (r) return r; /* Try all standard VCO configuration settings on channel 1 */ for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) { /* Configure synthesizer for channel 1 */ r = uw2453_synth_set_channel(chip, 1, false); if (r) return r; /* Write VCO config */ r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]); if (r) return r; /* ack interrupt event */ r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG); if (r) return r; /* check interrupt status */ r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG); if (r) return r; if (!(intr_status & 0xf)) { dev_dbg_f(zd_chip_dev(chip), "PLL locked on configuration %d\n", i); found_config = i; break; } } if (found_config == -1) { /* autocal */ dev_dbg_f(zd_chip_dev(chip), "PLL did not lock, using autocal\n"); r = uw2453_synth_set_channel(chip, 1, true); if (r) return r; r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG); if (r) return r; } /* To match the vendor driver behaviour, we use the configuration after * the one that produced a lock. */ UW2453_PRIV(rf)->config = found_config + 1; return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int uw2453_set_channel(struct zd_rf *rf, u8 channel) { int r; u16 vco_cfg; int config = UW2453_PRIV(rf)->config; bool autocal = (config == -1); struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 }, { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 }, }; r = uw2453_synth_set_channel(chip, channel, autocal); if (r) return r; if (autocal) vco_cfg = UW2453_AUTOCAL_VCO_CFG; else vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)]; r = uw2453_write_vco_cfg(chip, vco_cfg); if (r) return r; r = uw2453_init_mode(chip); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; r = uw2453_set_tx_gain_level(chip, channel); if (r) return r; return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int uw2453_switch_radio_on(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f }, }; /* enter RXTX mode */ r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS); if (r) return r; if (zd_chip_is_zd1211b(chip)) ioreqs[1].value = 0x7f; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int uw2453_switch_radio_off(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f }, }; /* enter IDLE mode */ /* FIXME: shouldn't we go to SLEEP? sent email to zydas */ r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS); if (r) return r; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static void uw2453_clear(struct zd_rf *rf) { kfree(rf->priv); } int zd_rf_init_uw2453(struct zd_rf *rf) { rf->init_hw = uw2453_init_hw; rf->set_channel = uw2453_set_channel; rf->switch_radio_on = uw2453_switch_radio_on; rf->switch_radio_off = uw2453_switch_radio_off; rf->patch_6m_band_edge = zd_rf_generic_patch_6m; rf->clear = uw2453_clear; /* we have our own TX integration code */ rf->update_channel_int = 0; rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL); if (rf->priv == NULL) return -ENOMEM; return 0; }
gpl-2.0
coldnew/linux
arch/x86/math-emu/poly_2xm1.c
14421
4476
/*---------------------------------------------------------------------------+ | poly_2xm1.c | | | | Function to compute 2^x-1 by a polynomial approximation. | | | | Copyright (C) 1992,1993,1994,1997 | | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia | | E-mail billm@suburbia.net | | | | | +---------------------------------------------------------------------------*/ #include "exception.h" #include "reg_constant.h" #include "fpu_emu.h" #include "fpu_system.h" #include "control_w.h" #include "poly.h" #define HIPOWER 11 static const unsigned long long lterms[HIPOWER] = { 0x0000000000000000LL, /* This term done separately as 12 bytes */ 0xf5fdeffc162c7543LL, 0x1c6b08d704a0bfa6LL, 0x0276556df749cc21LL, 0x002bb0ffcf14f6b8LL, 0x0002861225ef751cLL, 0x00001ffcbfcd5422LL, 0x00000162c005d5f1LL, 0x0000000da96ccb1bLL, 0x0000000078d1b897LL, 0x000000000422b029LL }; static const Xsig hiterm = MK_XSIG(0xb17217f7, 0xd1cf79ab, 0xc8a39194); /* Four slices: 0.0 : 0.25 : 0.50 : 0.75 : 1.0, These numbers are 2^(1/4), 2^(1/2), and 2^(3/4) */ static const Xsig shiftterm0 = MK_XSIG(0, 0, 0); static const Xsig shiftterm1 = MK_XSIG(0x9837f051, 0x8db8a96f, 0x46ad2318); static const Xsig shiftterm2 = MK_XSIG(0xb504f333, 0xf9de6484, 0x597d89b3); static const Xsig shiftterm3 = MK_XSIG(0xd744fcca, 0xd69d6af4, 0x39a68bb9); static const Xsig *shiftterm[] = { &shiftterm0, &shiftterm1, &shiftterm2, &shiftterm3 }; /*--- poly_2xm1() -----------------------------------------------------------+ | Requires st(0) which is TAG_Valid and < 1. | +---------------------------------------------------------------------------*/ int poly_2xm1(u_char sign, FPU_REG *arg, FPU_REG *result) { long int exponent, shift; unsigned long long Xll; Xsig accumulator, Denom, argSignif; u_char tag; exponent = exponent16(arg); #ifdef PARANOID if (exponent >= 0) { /* Don't want a |number| >= 1.0 */ /* Number negative, too large, or not Valid. */ EXCEPTION(EX_INTERNAL | 0x127); return 1; } #endif /* PARANOID */ argSignif.lsw = 0; XSIG_LL(argSignif) = Xll = significand(arg); if (exponent == -1) { shift = (argSignif.msw & 0x40000000) ? 3 : 2; /* subtract 0.5 or 0.75 */ exponent -= 2; XSIG_LL(argSignif) <<= 2; Xll <<= 2; } else if (exponent == -2) { shift = 1; /* subtract 0.25 */ exponent--; XSIG_LL(argSignif) <<= 1; Xll <<= 1; } else shift = 0; if (exponent < -2) { /* Shift the argument right by the required places. */ if (FPU_shrx(&Xll, -2 - exponent) >= 0x80000000U) Xll++; /* round up */ } accumulator.lsw = accumulator.midw = accumulator.msw = 0; polynomial_Xsig(&accumulator, &Xll, lterms, HIPOWER - 1); mul_Xsig_Xsig(&accumulator, &argSignif); shr_Xsig(&accumulator, 3); mul_Xsig_Xsig(&argSignif, &hiterm); /* The leading term */ add_two_Xsig(&accumulator, &argSignif, &exponent); if (shift) { /* The argument is large, use the identity: f(x+a) = f(a) * (f(x) + 1) - 1; */ shr_Xsig(&accumulator, -exponent); accumulator.msw |= 0x80000000; /* add 1.0 */ mul_Xsig_Xsig(&accumulator, shiftterm[shift]); accumulator.msw &= 0x3fffffff; /* subtract 1.0 */ exponent = 1; } if (sign != SIGN_POS) { /* The argument is negative, use the identity: f(-x) = -f(x) / (1 + f(x)) */ Denom.lsw = accumulator.lsw; XSIG_LL(Denom) = XSIG_LL(accumulator); if (exponent < 0) shr_Xsig(&Denom, -exponent); else if (exponent > 0) { /* exponent must be 1 here */ XSIG_LL(Denom) <<= 1; if (Denom.lsw & 0x80000000) XSIG_LL(Denom) |= 1; (Denom.lsw) <<= 1; } Denom.msw |= 0x80000000; /* add 1.0 */ div_Xsig(&accumulator, &Denom, &accumulator); } /* Convert to 64 bit signed-compatible */ exponent += round_Xsig(&accumulator); result = &st(0); significand(result) = XSIG_LL(accumulator); setexponent16(result, exponent); tag = FPU_round(result, 1, 0, FULL_PRECISION, sign); setsign(result, sign); FPU_settag0(tag); return 0; }
gpl-2.0
bedwa/P6800-Kernel
kernel/trace/trace_irqsoff.c
86
16461
/* * trace irqs off critical timings * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * From code in the latency_tracer, that is: * * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #include <linux/kallsyms.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> #include <linux/fs.h> #include "trace.h" static struct trace_array *irqsoff_trace __read_mostly; static int tracer_enabled __read_mostly; static DEFINE_PER_CPU(int, tracing_cpu); static DEFINE_SPINLOCK(max_trace_lock); enum { TRACER_IRQS_OFF = (1 << 1), TRACER_PREEMPT_OFF = (1 << 2), }; static int trace_type __read_mostly; static int save_lat_flag; static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph); #ifdef CONFIG_PREEMPT_TRACER static inline int preempt_trace(void) { return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); } #else # define preempt_trace() (0) #endif #ifdef CONFIG_IRQSOFF_TRACER static inline int irq_trace(void) { return ((trace_type & TRACER_IRQS_OFF) && irqs_disabled()); } #else # define irq_trace() (0) #endif #define TRACE_DISPLAY_GRAPH 1 static struct tracer_opt trace_opts[] = { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* display latency trace as call graph */ { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, #endif { } /* Empty entry */ }; static struct tracer_flags tracer_flags = { .val = 0, .opts = trace_opts, }; #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) /* * Sequence count - we record it when starting a measurement and * skip the latency if the sequence has changed - some other section * did a maximum and could disturb our measurement with serial console * printouts, etc. Truly coinciding maximum latencies should be rare * and what happens together happens separately as well, so this doesnt * decrease the validity of the maximum found: */ static __cacheline_aligned_in_smp unsigned long max_sequence; #ifdef CONFIG_FUNCTION_TRACER /* * irqsoff uses its own tracer function to keep the overhead down: */ static void irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; /* * Does not matter if we preempt. We test the flags * afterward, to see if irqs are disabled or not. * If we preempt and get a false positive, the flags * test will fail. */ cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return; local_save_flags(flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(flags)) return; data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, }; #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { int cpu; if (!(bit & TRACE_DISPLAY_GRAPH)) return -EINVAL; if (!(is_graph() ^ set)) return 0; stop_irqsoff_tracer(irqsoff_trace, !set); for_each_possible_cpu(cpu) per_cpu(tracing_cpu, cpu) = 0; tracing_max_latency = 0; tracing_reset_online_cpus(irqsoff_trace); return start_irqsoff_tracer(irqsoff_trace, set); } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int ret; int cpu; int pc; cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return 0; local_save_flags(flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(flags)) return 0; data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); ret = __trace_graph_entry(tr, trace, flags, pc); } else ret = 0; atomic_dec(&data->disabled); return ret; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; cpu = raw_smp_processor_id(); if (likely(!per_cpu(tracing_cpu, cpu))) return; local_save_flags(flags); /* slight chance to get a false positive on tracing_cpu */ if (!irqs_disabled_flags(flags)) return; data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); __trace_graph_return(tr, trace, flags, pc); } atomic_dec(&data->disabled); } static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph()) graph_trace_open(iter); } static void irqsoff_trace_close(struct trace_iterator *iter) { if (iter->private) graph_trace_close(iter); } #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ TRACE_GRAPH_PRINT_PROC) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { u32 flags = GRAPH_TRACER_FLAGS; if (trace_flags & TRACE_ITER_LATENCY_FMT) flags |= TRACE_GRAPH_PRINT_DURATION; else flags |= TRACE_GRAPH_PRINT_ABS_TIME; /* * In graph mode call the graph tracer output function, * otherwise go with the TRACE_FN event handler */ if (is_graph()) return print_graph_function_flags(iter, flags); return TRACE_TYPE_UNHANDLED; } static void irqsoff_print_header(struct seq_file *s) { if (is_graph()) { struct trace_iterator *iter = s->private; u32 flags = GRAPH_TRACER_FLAGS; if (trace_flags & TRACE_ITER_LATENCY_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(s, iter); flags |= TRACE_GRAPH_PRINT_DURATION; } else flags |= TRACE_GRAPH_PRINT_ABS_TIME; print_graph_headers_flags(s, flags); } else trace_default_header(s); } static void trace_graph_function(struct trace_array *tr, unsigned long ip, unsigned long flags, int pc) { u64 time = trace_clock_local(); struct ftrace_graph_ent ent = { .func = ip, .depth = 0, }; struct ftrace_graph_ret ret = { .func = ip, .depth = 0, .calltime = time, .rettime = time, }; __trace_graph_entry(tr, &ent, flags, pc); __trace_graph_return(tr, &ret, flags, pc); } static void __trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (!is_graph()) trace_function(tr, ip, parent_ip, flags, pc); else { trace_graph_function(tr, parent_ip, flags, pc); trace_graph_function(tr, ip, flags, pc); } } #else #define __trace_function trace_function static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) { return -EINVAL; } static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) { return -1; } static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) { return TRACE_TYPE_UNHANDLED; } static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } static void irqsoff_print_header(struct seq_file *s) { } static void irqsoff_trace_open(struct trace_iterator *iter) { } static void irqsoff_trace_close(struct trace_iterator *iter) { } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ /* * Should this new latency be reported/recorded? */ static int report_latency(cycle_t delta) { if (tracing_thresh) { if (delta < tracing_thresh) return 0; } else { if (delta <= tracing_max_latency) return 0; } return 1; } static void check_critical_timing(struct trace_array *tr, struct trace_array_cpu *data, unsigned long parent_ip, int cpu) { cycle_t T0, T1, delta; unsigned long flags; int pc; T0 = data->preempt_timestamp; T1 = ftrace_now(cpu); delta = T1-T0; local_save_flags(flags); pc = preempt_count(); if (!report_latency(delta)) goto out; spin_lock_irqsave(&max_trace_lock, flags); /* check if we are still the max latency */ if (!report_latency(delta)) goto out_unlock; __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); /* Skip 5 functions to get to the irq/preempt enable function */ __trace_stack(tr, flags, 5, pc); if (data->critical_sequence != max_sequence) goto out_unlock; data->critical_end = parent_ip; if (likely(!is_tracing_stopped())) { tracing_max_latency = delta; update_max_tr_single(tr, current, cpu); } max_sequence++; out_unlock: spin_unlock_irqrestore(&max_trace_lock, flags); out: data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } static inline void start_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; if (likely(!tracer_enabled)) return; cpu = raw_smp_processor_id(); if (per_cpu(tracing_cpu, cpu)) return; data = tr->data[cpu]; if (unlikely(!data) || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); data->critical_start = parent_ip ? : ip; local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; atomic_dec(&data->disabled); } static inline void stop_critical_timing(unsigned long ip, unsigned long parent_ip) { int cpu; struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; unsigned long flags; cpu = raw_smp_processor_id(); /* Always clear the tracing cpu on stopping the trace */ if (unlikely(per_cpu(tracing_cpu, cpu))) per_cpu(tracing_cpu, cpu) = 0; else return; if (!tracer_enabled) return; data = tr->data[cpu]; if (unlikely(!data) || !data->critical_start || atomic_read(&data->disabled)) return; atomic_inc(&data->disabled); local_save_flags(flags); __trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); } /* start and stop critical timings used to for stoppage (in idle) */ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_IRQSOFF_TRACER #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); } #else /* !CONFIG_PROVE_LOCKING */ /* * Stubs: */ void early_boot_irqs_off(void) { } void early_boot_irqs_on(void) { } void trace_softirqs_on(unsigned long ip) { } void trace_softirqs_off(unsigned long ip) { } inline void print_irqtrace_events(struct task_struct *curr) { } /* * We are only interested in hardirq on/off events: */ void trace_hardirqs_on(void) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_on); void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } EXPORT_SYMBOL(trace_hardirqs_off); void trace_hardirqs_on_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_PROVE_LOCKING */ #endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { if (preempt_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { if (preempt_trace()) start_critical_timing(a0, a1); } #endif /* CONFIG_PREEMPT_TRACER */ static int start_irqsoff_tracer(struct trace_array *tr, int graph) { int ret = 0; if (!graph) ret = register_ftrace_function(&trace_ops); else ret = register_ftrace_graph(&irqsoff_graph_return, &irqsoff_graph_entry); if (!ret && tracing_is_enabled()) tracer_enabled = 1; else tracer_enabled = 0; return ret; } static void stop_irqsoff_tracer(struct trace_array *tr, int graph) { tracer_enabled = 0; if (!graph) unregister_ftrace_function(&trace_ops); else unregister_ftrace_graph(); } static void __irqsoff_tracer_init(struct trace_array *tr) { save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; trace_flags |= TRACE_ITER_LATENCY_FMT; tracing_max_latency = 0; irqsoff_trace = tr; /* make sure that the tracer is visible */ smp_wmb(); tracing_reset_online_cpus(tr); if (start_irqsoff_tracer(tr, is_graph())) printk(KERN_ERR "failed to start irqsoff tracer\n"); } static void irqsoff_tracer_reset(struct trace_array *tr) { stop_irqsoff_tracer(tr, is_graph()); if (!save_lat_flag) trace_flags &= ~TRACE_ITER_LATENCY_FMT; } static void irqsoff_tracer_start(struct trace_array *tr) { tracer_enabled = 1; } static void irqsoff_tracer_stop(struct trace_array *tr) { tracer_enabled = 0; } #ifdef CONFIG_IRQSOFF_TRACER static int irqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer irqsoff_tracer __read_mostly = { .name = "irqsoff", .init = irqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_irqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_irqsoff(trace) register_tracer(&trace) #else # define register_irqsoff(trace) do { } while (0) #endif #ifdef CONFIG_PREEMPT_TRACER static int preemptoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptoff_tracer __read_mostly = { .name = "preemptoff", .init = preemptoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptoff(trace) register_tracer(&trace) #else # define register_preemptoff(trace) do { } while (0) #endif #if defined(CONFIG_IRQSOFF_TRACER) && \ defined(CONFIG_PREEMPT_TRACER) static int preemptirqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; __irqsoff_tracer_init(tr); return 0; } static struct tracer preemptirqsoff_tracer __read_mostly = { .name = "preemptirqsoff", .init = preemptirqsoff_tracer_init, .reset = irqsoff_tracer_reset, .start = irqsoff_tracer_start, .stop = irqsoff_tracer_stop, .print_max = 1, .print_header = irqsoff_print_header, .print_line = irqsoff_print_line, .flags = &tracer_flags, .set_flag = irqsoff_set_flag, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_preemptirqsoff, #endif .open = irqsoff_trace_open, .close = irqsoff_trace_close, .use_max_tr = 1, }; # define register_preemptirqsoff(trace) register_tracer(&trace) #else # define register_preemptirqsoff(trace) do { } while (0) #endif __init static int init_irqsoff_tracer(void) { register_irqsoff(irqsoff_tracer); register_preemptoff(preemptoff_tracer); register_preemptirqsoff(preemptirqsoff_tracer); return 0; } device_initcall(init_irqsoff_tracer);
gpl-2.0
fweisbec/tracing
drivers/staging/iio/adc/ad7606_core.c
86
14469
/* * AD7606 SPI ADC driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "ad7606.h" int ad7606_reset(struct ad7606_state *st) { if (gpio_is_valid(st->pdata->gpio_reset)) { gpio_set_value(st->pdata->gpio_reset, 1); ndelay(100); /* t_reset >= 100ns */ gpio_set_value(st->pdata->gpio_reset, 0); return 0; } return -ENODEV; } static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch) { struct ad7606_state *st = iio_priv(indio_dev); int ret; st->done = false; gpio_set_value(st->pdata->gpio_convst, 1); ret = wait_event_interruptible(st->wq_data_avail, st->done); if (ret) goto error_ret; if (gpio_is_valid(st->pdata->gpio_frstdata)) { ret = st->bops->read_block(st->dev, 1, st->data); if (ret) goto error_ret; if (!gpio_get_value(st->pdata->gpio_frstdata)) { /* This should never happen */ ad7606_reset(st); ret = -EIO; goto error_ret; } ret = st->bops->read_block(st->dev, st->chip_info->num_channels - 1, &st->data[1]); if (ret) goto error_ret; } else { ret = st->bops->read_block(st->dev, st->chip_info->num_channels, st->data); if (ret) goto error_ret; } ret = st->data[ch]; error_ret: gpio_set_value(st->pdata->gpio_convst, 0); return ret; } static int ad7606_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { int ret; struct ad7606_state *st = iio_priv(indio_dev); unsigned int scale_uv; switch (m) { case 0: mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) ret = -EBUSY; else ret = ad7606_scan_direct(indio_dev, chan->address); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; *val = (short) ret; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: scale_uv = (st->range * 1000 * 2) >> st->chip_info->channels[0].scan_type.realbits; *val = scale_uv / 1000; *val2 = (scale_uv % 1000) * 1000; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static ssize_t ad7606_show_range(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); return sprintf(buf, "%u\n", st->range); } static ssize_t ad7606_store_range(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); unsigned long lval; if (strict_strtoul(buf, 10, &lval)) return -EINVAL; if (!(lval == 5000 || lval == 10000)) { dev_err(dev, "range is not supported\n"); return -EINVAL; } mutex_lock(&indio_dev->mlock); gpio_set_value(st->pdata->gpio_range, lval == 10000); st->range = lval; mutex_unlock(&indio_dev->mlock); return count; } static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR, \ ad7606_show_range, ad7606_store_range, 0); static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000"); static ssize_t ad7606_show_oversampling_ratio(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); return sprintf(buf, "%u\n", st->oversampling); } static int ad7606_oversampling_get_index(unsigned val) { unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64}; int i; for (i = 0; i < ARRAY_SIZE(supported); i++) if (val == supported[i]) return i; return -EINVAL; } static ssize_t ad7606_store_oversampling_ratio(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); unsigned long lval; int ret; if (strict_strtoul(buf, 10, &lval)) return -EINVAL; ret = ad7606_oversampling_get_index(lval); if (ret < 0) { dev_err(dev, "oversampling %lu is not supported\n", lval); return ret; } mutex_lock(&indio_dev->mlock); gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); st->oversampling = lval; mutex_unlock(&indio_dev->mlock); return count; } static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR, ad7606_show_oversampling_ratio, ad7606_store_oversampling_ratio, 0); static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64"); static struct attribute *ad7606_attributes[] = { &iio_dev_attr_in_voltage_range.dev_attr.attr, &iio_const_attr_in_voltage_range_available.dev_attr.attr, &iio_dev_attr_oversampling_ratio.dev_attr.attr, &iio_const_attr_oversampling_ratio_available.dev_attr.attr, NULL, }; static umode_t ad7606_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad7606_state *st = iio_priv(indio_dev); umode_t mode = attr->mode; if (!(gpio_is_valid(st->pdata->gpio_os0) && gpio_is_valid(st->pdata->gpio_os1) && gpio_is_valid(st->pdata->gpio_os2)) && (attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr || attr == &iio_const_attr_oversampling_ratio_available.dev_attr.attr)) mode = 0; else if (!gpio_is_valid(st->pdata->gpio_range) && (attr == &iio_dev_attr_in_voltage_range.dev_attr.attr || attr == &iio_const_attr_in_voltage_range_available.dev_attr.attr)) mode = 0; return mode; } static const struct attribute_group ad7606_attribute_group = { .attrs = ad7606_attributes, .is_visible = ad7606_attr_is_visible, }; #define AD7606_CHANNEL(num) \ { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = num, \ .address = num, \ .scan_index = num, \ .scan_type = IIO_ST('s', 16, 16, 0), \ } static struct iio_chan_spec ad7606_8_channels[] = { AD7606_CHANNEL(0), AD7606_CHANNEL(1), AD7606_CHANNEL(2), AD7606_CHANNEL(3), AD7606_CHANNEL(4), AD7606_CHANNEL(5), AD7606_CHANNEL(6), AD7606_CHANNEL(7), IIO_CHAN_SOFT_TIMESTAMP(8), }; static struct iio_chan_spec ad7606_6_channels[] = { AD7606_CHANNEL(0), AD7606_CHANNEL(1), AD7606_CHANNEL(2), AD7606_CHANNEL(3), AD7606_CHANNEL(4), AD7606_CHANNEL(5), IIO_CHAN_SOFT_TIMESTAMP(6), }; static struct iio_chan_spec ad7606_4_channels[] = { AD7606_CHANNEL(0), AD7606_CHANNEL(1), AD7606_CHANNEL(2), AD7606_CHANNEL(3), IIO_CHAN_SOFT_TIMESTAMP(4), }; static const struct ad7606_chip_info ad7606_chip_info_tbl[] = { /* * More devices added in future */ [ID_AD7606_8] = { .name = "ad7606", .int_vref_mv = 2500, .channels = ad7606_8_channels, .num_channels = 8, }, [ID_AD7606_6] = { .name = "ad7606-6", .int_vref_mv = 2500, .channels = ad7606_6_channels, .num_channels = 6, }, [ID_AD7606_4] = { .name = "ad7606-4", .int_vref_mv = 2500, .channels = ad7606_4_channels, .num_channels = 4, }, }; static int ad7606_request_gpios(struct ad7606_state *st) { struct gpio gpio_array[3] = { [0] = { .gpio = st->pdata->gpio_os0, .flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW), .label = "AD7606_OS0", }, [1] = { .gpio = st->pdata->gpio_os1, .flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW), .label = "AD7606_OS1", }, [2] = { .gpio = st->pdata->gpio_os2, .flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW), .label = "AD7606_OS2", }, }; int ret; if (gpio_is_valid(st->pdata->gpio_convst)) { ret = gpio_request_one(st->pdata->gpio_convst, GPIOF_OUT_INIT_LOW, "AD7606_CONVST"); if (ret) { dev_err(st->dev, "failed to request GPIO CONVST\n"); goto error_ret; } } else { ret = -EIO; goto error_ret; } if (gpio_is_valid(st->pdata->gpio_os0) && gpio_is_valid(st->pdata->gpio_os1) && gpio_is_valid(st->pdata->gpio_os2)) { ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array)); if (ret < 0) goto error_free_convst; } if (gpio_is_valid(st->pdata->gpio_reset)) { ret = gpio_request_one(st->pdata->gpio_reset, GPIOF_OUT_INIT_LOW, "AD7606_RESET"); if (ret < 0) goto error_free_os; } if (gpio_is_valid(st->pdata->gpio_range)) { ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT | ((st->range == 10000) ? GPIOF_INIT_HIGH : GPIOF_INIT_LOW), "AD7606_RANGE"); if (ret < 0) goto error_free_reset; } if (gpio_is_valid(st->pdata->gpio_stby)) { ret = gpio_request_one(st->pdata->gpio_stby, GPIOF_OUT_INIT_HIGH, "AD7606_STBY"); if (ret < 0) goto error_free_range; } if (gpio_is_valid(st->pdata->gpio_frstdata)) { ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN, "AD7606_FRSTDATA"); if (ret < 0) goto error_free_stby; } return 0; error_free_stby: if (gpio_is_valid(st->pdata->gpio_stby)) gpio_free(st->pdata->gpio_stby); error_free_range: if (gpio_is_valid(st->pdata->gpio_range)) gpio_free(st->pdata->gpio_range); error_free_reset: if (gpio_is_valid(st->pdata->gpio_reset)) gpio_free(st->pdata->gpio_reset); error_free_os: if (gpio_is_valid(st->pdata->gpio_os0) && gpio_is_valid(st->pdata->gpio_os1) && gpio_is_valid(st->pdata->gpio_os2)) gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array)); error_free_convst: gpio_free(st->pdata->gpio_convst); error_ret: return ret; } static void ad7606_free_gpios(struct ad7606_state *st) { if (gpio_is_valid(st->pdata->gpio_frstdata)) gpio_free(st->pdata->gpio_frstdata); if (gpio_is_valid(st->pdata->gpio_stby)) gpio_free(st->pdata->gpio_stby); if (gpio_is_valid(st->pdata->gpio_range)) gpio_free(st->pdata->gpio_range); if (gpio_is_valid(st->pdata->gpio_reset)) gpio_free(st->pdata->gpio_reset); if (gpio_is_valid(st->pdata->gpio_os0) && gpio_is_valid(st->pdata->gpio_os1) && gpio_is_valid(st->pdata->gpio_os2)) { gpio_free(st->pdata->gpio_os2); gpio_free(st->pdata->gpio_os1); gpio_free(st->pdata->gpio_os0); } gpio_free(st->pdata->gpio_convst); } /** * Interrupt handler */ static irqreturn_t ad7606_interrupt(int irq, void *dev_id) { struct iio_dev *indio_dev = dev_id; struct ad7606_state *st = iio_priv(indio_dev); if (iio_buffer_enabled(indio_dev)) { if (!work_pending(&st->poll_work)) schedule_work(&st->poll_work); } else { st->done = true; wake_up_interruptible(&st->wq_data_avail); } return IRQ_HANDLED; }; static const struct iio_info ad7606_info = { .driver_module = THIS_MODULE, .read_raw = &ad7606_read_raw, .attrs = &ad7606_attribute_group, }; struct iio_dev *ad7606_probe(struct device *dev, int irq, void __iomem *base_address, unsigned id, const struct ad7606_bus_ops *bops) { struct ad7606_platform_data *pdata = dev->platform_data; struct ad7606_state *st; int ret; struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); st->dev = dev; st->bops = bops; st->base_address = base_address; st->range = pdata->default_range == 10000 ? 10000 : 5000; ret = ad7606_oversampling_get_index(pdata->default_os); if (ret < 0) { dev_warn(dev, "oversampling %d is not supported\n", pdata->default_os); st->oversampling = 0; } else { st->oversampling = pdata->default_os; } st->reg = regulator_get(dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) goto error_put_reg; } st->pdata = pdata; st->chip_info = &ad7606_chip_info_tbl[id]; indio_dev->dev.parent = dev; indio_dev->info = &ad7606_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->name = st->chip_info->name; indio_dev->channels = st->chip_info->channels; indio_dev->num_channels = st->chip_info->num_channels; init_waitqueue_head(&st->wq_data_avail); ret = ad7606_request_gpios(st); if (ret) goto error_disable_reg; ret = ad7606_reset(st); if (ret) dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n"); ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev); if (ret) goto error_free_gpios; ret = ad7606_register_ring_funcs_and_init(indio_dev); if (ret) goto error_free_irq; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_cleanup_ring; ret = iio_device_register(indio_dev); if (ret) goto error_unregister_ring; return indio_dev; error_unregister_ring: iio_buffer_unregister(indio_dev); error_cleanup_ring: ad7606_ring_cleanup(indio_dev); error_free_irq: free_irq(irq, indio_dev); error_free_gpios: ad7606_free_gpios(st); error_disable_reg: if (!IS_ERR(st->reg)) regulator_disable(st->reg); error_put_reg: if (!IS_ERR(st->reg)) regulator_put(st->reg); iio_free_device(indio_dev); error_ret: return ERR_PTR(ret); } int ad7606_remove(struct iio_dev *indio_dev, int irq) { struct ad7606_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); iio_buffer_unregister(indio_dev); ad7606_ring_cleanup(indio_dev); free_irq(irq, indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } ad7606_free_gpios(st); iio_free_device(indio_dev); return 0; } void ad7606_suspend(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); if (gpio_is_valid(st->pdata->gpio_stby)) { if (gpio_is_valid(st->pdata->gpio_range)) gpio_set_value(st->pdata->gpio_range, 1); gpio_set_value(st->pdata->gpio_stby, 0); } } void ad7606_resume(struct iio_dev *indio_dev) { struct ad7606_state *st = iio_priv(indio_dev); if (gpio_is_valid(st->pdata->gpio_stby)) { if (gpio_is_valid(st->pdata->gpio_range)) gpio_set_value(st->pdata->gpio_range, st->range == 10000); gpio_set_value(st->pdata->gpio_stby, 1); ad7606_reset(st); } } MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7606 ADC"); MODULE_LICENSE("GPL v2");
gpl-2.0
EPDCenterSpain/bq-DC-v2
arch/arm/mach-rk30/cpuidle.c
86
2072
/* * Copyright (C) 2012 ROCKCHIP, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) "cpuidle: %s: " fmt, __func__ #include <linux/pm.h> #include <linux/cpuidle.h> #include <linux/suspend.h> #include <linux/err.h> #include <asm/hardware/gic.h> #include <asm/io.h> static bool rk30_gic_interrupt_pending(void) { return (readl_relaxed(RK30_GICC_BASE + GIC_CPU_HIGHPRI) != 0x3FF); } static void rk30_wfi_until_interrupt(void) { retry: cpu_do_idle(); if (!rk30_gic_interrupt_pending()) goto retry; } static int rk30_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { ktime_t preidle, postidle; local_fiq_disable(); preidle = ktime_get(); rk30_wfi_until_interrupt(); postidle = ktime_get(); local_fiq_enable(); local_irq_enable(); return ktime_to_us(ktime_sub(postidle, preidle)); } static DEFINE_PER_CPU(struct cpuidle_device, rk30_cpuidle_device); static __initdata struct cpuidle_state rk30_cpuidle_states[] = { { .name = "C1", .desc = "idle", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 0, .target_residency = 0, .enter = rk30_idle, }, }; static struct cpuidle_driver rk30_cpuidle_driver = { .name = "rk30_cpuidle", .owner = THIS_MODULE, }; static int __init rk30_cpuidle_init(void) { struct cpuidle_device *dev; unsigned int cpu; int ret; ret = cpuidle_register_driver(&rk30_cpuidle_driver); if (ret) { pr_err("failed to register cpuidle driver: %d\n", ret); return ret; } for_each_possible_cpu(cpu) { dev = &per_cpu(rk30_cpuidle_device, cpu); dev->cpu = cpu; dev->state_count = ARRAY_SIZE(rk30_cpuidle_states); memcpy(dev->states, rk30_cpuidle_states, sizeof(rk30_cpuidle_states)); dev->safe_state = &dev->states[0]; ret = cpuidle_register_device(dev); if (ret) { pr_err("failed to register cpuidle device for cpu %u: %d\n", cpu, ret); return ret; } } return 0; } late_initcall(rk30_cpuidle_init);
gpl-2.0
DeltaResero/GC-Wii-Linux-Kernel-3.12.y
sound/soc/samsung/dma.c
86
11347
/* * dma.c -- ALSA Soc Audio Layer * * (c) 2006 Wolfson Microelectronics PLC. * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com * * Copyright 2004-2005 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <sound/soc.h> #include <sound/pcm_params.h> #include <asm/dma.h> #include <mach/hardware.h> #include <mach/dma.h> #include "dma.h" #define ST_RUNNING (1<<0) #define ST_OPENED (1<<1) static const struct snd_pcm_hardware dma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 128*1024, .period_bytes_min = PAGE_SIZE, .period_bytes_max = PAGE_SIZE*2, .periods_min = 2, .periods_max = 128, .fifo_size = 32, }; struct runtime_data { spinlock_t lock; int state; unsigned int dma_loaded; unsigned int dma_period; dma_addr_t dma_start; dma_addr_t dma_pos; dma_addr_t dma_end; struct s3c_dma_params *params; }; static void audio_buffdone(void *data); /* dma_enqueue * * place a dma buffer onto the queue for the dma system * to handle. */ static void dma_enqueue(struct snd_pcm_substream *substream) { struct runtime_data *prtd = substream->runtime->private_data; dma_addr_t pos = prtd->dma_pos; unsigned int limit; struct samsung_dma_prep dma_info; pr_debug("Entered %s\n", __func__); limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; pr_debug("%s: loaded %d, limit %d\n", __func__, prtd->dma_loaded, limit); dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); dma_info.direction = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); dma_info.fp = audio_buffdone; dma_info.fp_param = substream; dma_info.period = prtd->dma_period; dma_info.len = prtd->dma_period*limit; if (dma_info.cap == DMA_CYCLIC) { dma_info.buf = pos; prtd->params->ops->prepare(prtd->params->ch, &dma_info); prtd->dma_loaded += limit; return; } while (prtd->dma_loaded < limit) { pr_debug("dma_loaded: %d\n", prtd->dma_loaded); if ((pos + dma_info.period) > prtd->dma_end) { dma_info.period = prtd->dma_end - pos; pr_debug("%s: corrected dma len %ld\n", __func__, dma_info.period); } dma_info.buf = pos; prtd->params->ops->prepare(prtd->params->ch, &dma_info); prtd->dma_loaded++; pos += prtd->dma_period; if (pos >= prtd->dma_end) pos = prtd->dma_start; } prtd->dma_pos = pos; } static void audio_buffdone(void *data) { struct snd_pcm_substream *substream = data; struct runtime_data *prtd = substream->runtime->private_data; pr_debug("Entered %s\n", __func__); if (prtd->state & ST_RUNNING) { prtd->dma_pos += prtd->dma_period; if (prtd->dma_pos >= prtd->dma_end) prtd->dma_pos = prtd->dma_start; if (substream) snd_pcm_period_elapsed(substream); spin_lock(&prtd->lock); if (!samsung_dma_has_circular()) { prtd->dma_loaded--; dma_enqueue(substream); } spin_unlock(&prtd->lock); } } static int dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *rtd = substream->private_data; unsigned long totbytes = params_buffer_bytes(params); struct s3c_dma_params *dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); struct samsung_dma_req req; struct samsung_dma_config config; pr_debug("Entered %s\n", __func__); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!dma) return 0; /* this may get called several times by oss emulation * with different params -HW */ if (prtd->params == NULL) { /* prepare DMA */ prtd->params = dma; pr_debug("params %p, client %p, channel %d\n", prtd->params, prtd->params->client, prtd->params->channel); prtd->params->ops = samsung_dma_get_ops(); req.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); req.client = prtd->params->client; config.direction = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); config.width = prtd->params->dma_size; config.fifo = prtd->params->dma_addr; prtd->params->ch = prtd->params->ops->request( prtd->params->channel, &req, rtd->cpu_dai->dev, prtd->params->ch_name); if (!prtd->params->ch) { pr_err("Failed to allocate DMA channel\n"); return -ENXIO; } prtd->params->ops->config(prtd->params->ch, &config); } snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = totbytes; spin_lock_irq(&prtd->lock); prtd->dma_loaded = 0; prtd->dma_period = params_period_bytes(params); prtd->dma_start = runtime->dma_addr; prtd->dma_pos = prtd->dma_start; prtd->dma_end = prtd->dma_start + totbytes; spin_unlock_irq(&prtd->lock); return 0; } static int dma_hw_free(struct snd_pcm_substream *substream) { struct runtime_data *prtd = substream->runtime->private_data; pr_debug("Entered %s\n", __func__); snd_pcm_set_runtime_buffer(substream, NULL); if (prtd->params) { prtd->params->ops->flush(prtd->params->ch); prtd->params->ops->release(prtd->params->ch, prtd->params->client); prtd->params = NULL; } return 0; } static int dma_prepare(struct snd_pcm_substream *substream) { struct runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->params) return 0; /* flush the DMA channel */ prtd->params->ops->flush(prtd->params->ch); prtd->dma_loaded = 0; prtd->dma_pos = prtd->dma_start; /* enqueue dma buffers */ dma_enqueue(substream); return ret; } static int dma_trigger(struct snd_pcm_substream *substream, int cmd) { struct runtime_data *prtd = substream->runtime->private_data; int ret = 0; pr_debug("Entered %s\n", __func__); spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->state |= ST_RUNNING; prtd->params->ops->trigger(prtd->params->ch); break; case SNDRV_PCM_TRIGGER_STOP: prtd->state &= ~ST_RUNNING; prtd->params->ops->stop(prtd->params->ch); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; } static snd_pcm_uframes_t dma_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct runtime_data *prtd = runtime->private_data; unsigned long res; pr_debug("Entered %s\n", __func__); res = prtd->dma_pos - prtd->dma_start; pr_debug("Pointer offset: %lu\n", res); /* we seem to be getting the odd error from the pcm library due * to out-of-bounds pointers. this is maybe due to the dma engine * not having loaded the new values for the channel before being * called... (todo - fix ) */ if (res >= snd_pcm_lib_buffer_bytes(substream)) { if (res == snd_pcm_lib_buffer_bytes(substream)) res = 0; } return bytes_to_frames(substream->runtime, res); } static int dma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct runtime_data *prtd; pr_debug("Entered %s\n", __func__); snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); snd_soc_set_runtime_hwparams(substream, &dma_hardware); prtd = kzalloc(sizeof(struct runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; spin_lock_init(&prtd->lock); runtime->private_data = prtd; return 0; } static int dma_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct runtime_data *prtd = runtime->private_data; pr_debug("Entered %s\n", __func__); if (!prtd) pr_debug("dma_close called with prtd == NULL\n"); kfree(prtd); return 0; } static int dma_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; pr_debug("Entered %s\n", __func__); return dma_mmap_writecombine(substream->pcm->card->dev, vma, runtime->dma_area, runtime->dma_addr, runtime->dma_bytes); } static struct snd_pcm_ops dma_ops = { .open = dma_open, .close = dma_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = dma_hw_params, .hw_free = dma_hw_free, .prepare = dma_prepare, .trigger = dma_trigger, .pointer = dma_pointer, .mmap = dma_mmap, }; static int preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = dma_hardware.buffer_bytes_max; pr_debug("Entered %s\n", __func__); buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_writecombine(pcm->card->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) return -ENOMEM; buf->bytes = size; return 0; } static void dma_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; pr_debug("Entered %s\n", __func__); for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, buf->area, buf->addr); buf->area = NULL; } } static u64 dma_mask = DMA_BIT_MASK(32); static int dma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; pr_debug("Entered %s\n", __func__); if (!card->dev->dma_mask) card->dev->dma_mask = &dma_mask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: return ret; } static struct snd_soc_platform_driver samsung_asoc_platform = { .ops = &dma_ops, .pcm_new = dma_new, .pcm_free = dma_free_dma_buffers, }; int samsung_asoc_dma_platform_register(struct device *dev) { return snd_soc_register_platform(dev, &samsung_asoc_platform); } EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register); void samsung_asoc_dma_platform_unregister(struct device *dev) { snd_soc_unregister_platform(dev); } EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_unregister); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_DESCRIPTION("Samsung ASoC DMA Driver"); MODULE_LICENSE("GPL");
gpl-2.0
openedev/streak_4.05_kernel
drivers/sh/intc.c
854
34023
/* * Shared interrupt handling code for IPR and INTC2 types of IRQs. * * Copyright (C) 2007, 2008 Magnus Damm * Copyright (C) 2009, 2010 Paul Mundt * * Based on intc2.c and ipr.c * * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi * Copyright (C) 2000 Kazumoto Kojima * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp> * Copyright (C) 2005, 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/sh_intc.h> #include <linux/sysdev.h> #include <linux/list.h> #include <linux/topology.h> #include <linux/bitmap.h> #include <linux/cpumask.h> #include <asm/sizes.h> #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ ((addr_e) << 16) | ((addr_d << 24))) #define _INTC_SHIFT(h) (h & 0x1f) #define _INTC_WIDTH(h) ((h >> 5) & 0xf) #define _INTC_FN(h) ((h >> 9) & 0xf) #define _INTC_MODE(h) ((h >> 13) & 0x7) #define _INTC_ADDR_E(h) ((h >> 16) & 0xff) #define _INTC_ADDR_D(h) ((h >> 24) & 0xff) struct intc_handle_int { unsigned int irq; unsigned long handle; }; struct intc_window { phys_addr_t phys; void __iomem *virt; unsigned long size; }; struct intc_desc_int { struct list_head list; struct sys_device sysdev; pm_message_t state; unsigned long *reg; #ifdef CONFIG_SMP unsigned long *smp; #endif unsigned int nr_reg; struct intc_handle_int *prio; unsigned int nr_prio; struct intc_handle_int *sense; unsigned int nr_sense; struct intc_window *window; unsigned int nr_windows; struct irq_chip chip; }; static LIST_HEAD(intc_list); /* * The intc_irq_map provides a global map of bound IRQ vectors for a * given platform. Allocation of IRQs are either static through the CPU * vector map, or dynamic in the case of board mux vectors or MSI. * * As this is a central point for all IRQ controllers on the system, * each of the available sources are mapped out here. This combined with * sparseirq makes it quite trivial to keep the vector map tightly packed * when dynamically creating IRQs, as well as tying in to otherwise * unused irq_desc positions in the sparse array. */ static DECLARE_BITMAP(intc_irq_map, NR_IRQS); static DEFINE_SPINLOCK(vector_lock); #ifdef CONFIG_SMP #define IS_SMP(x) x.smp #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1) #else #define IS_SMP(x) 0 #define INTC_REG(d, x, c) (d->reg[(x)]) #define SMP_NR(d, x) 1 #endif static unsigned int intc_prio_level[NR_IRQS]; /* for now */ static unsigned int default_prio_level = 2; /* 2 - 16 */ static unsigned long ack_handle[NR_IRQS]; #ifdef CONFIG_INTC_BALANCING static unsigned long dist_handle[NR_IRQS]; #endif static inline struct intc_desc_int *get_intc_desc(unsigned int irq) { struct irq_chip *chip = get_irq_chip(irq); return container_of(chip, struct intc_desc_int, chip); } static unsigned long intc_phys_to_virt(struct intc_desc_int *d, unsigned long address) { struct intc_window *window; int k; /* scan through physical windows and convert address */ for (k = 0; k < d->nr_windows; k++) { window = d->window + k; if (address < window->phys) continue; if (address >= (window->phys + window->size)) continue; address -= window->phys; address += (unsigned long)window->virt; return address; } /* no windows defined, register must be 1:1 mapped virt:phys */ return address; } static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address) { unsigned int k; address = intc_phys_to_virt(d, address); for (k = 0; k < d->nr_reg; k++) { if (d->reg[k] == address) return k; } BUG(); return 0; } static inline unsigned int set_field(unsigned int value, unsigned int field_value, unsigned int handle) { unsigned int width = _INTC_WIDTH(handle); unsigned int shift = _INTC_SHIFT(handle); value &= ~(((1 << width) - 1) << shift); value |= field_value << shift; return value; } static void write_8(unsigned long addr, unsigned long h, unsigned long data) { __raw_writeb(set_field(0, data, h), addr); (void)__raw_readb(addr); /* Defeat write posting */ } static void write_16(unsigned long addr, unsigned long h, unsigned long data) { __raw_writew(set_field(0, data, h), addr); (void)__raw_readw(addr); /* Defeat write posting */ } static void write_32(unsigned long addr, unsigned long h, unsigned long data) { __raw_writel(set_field(0, data, h), addr); (void)__raw_readl(addr); /* Defeat write posting */ } static void modify_8(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); __raw_writeb(set_field(__raw_readb(addr), data, h), addr); (void)__raw_readb(addr); /* Defeat write posting */ local_irq_restore(flags); } static void modify_16(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); __raw_writew(set_field(__raw_readw(addr), data, h), addr); (void)__raw_readw(addr); /* Defeat write posting */ local_irq_restore(flags); } static void modify_32(unsigned long addr, unsigned long h, unsigned long data) { unsigned long flags; local_irq_save(flags); __raw_writel(set_field(__raw_readl(addr), data, h), addr); (void)__raw_readl(addr); /* Defeat write posting */ local_irq_restore(flags); } enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 }; static void (*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data) = { [REG_FN_WRITE_BASE + 0] = write_8, [REG_FN_WRITE_BASE + 1] = write_16, [REG_FN_WRITE_BASE + 3] = write_32, [REG_FN_MODIFY_BASE + 0] = modify_8, [REG_FN_MODIFY_BASE + 1] = modify_16, [REG_FN_MODIFY_BASE + 3] = modify_32, }; enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */ MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */ MODE_DUAL_REG, /* Two registers, set bit to enable / disable */ MODE_PRIO_REG, /* Priority value written to enable interrupt */ MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */ }; static void intc_mode_field(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) { fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1)); } static void intc_mode_zero(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) { fn(addr, handle, 0); } static void intc_mode_prio(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) { fn(addr, handle, intc_prio_level[irq]); } static void (*intc_enable_fns[])(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) = { [MODE_ENABLE_REG] = intc_mode_field, [MODE_MASK_REG] = intc_mode_zero, [MODE_DUAL_REG] = intc_mode_field, [MODE_PRIO_REG] = intc_mode_prio, [MODE_PCLR_REG] = intc_mode_prio, }; static void (*intc_disable_fns[])(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) = { [MODE_ENABLE_REG] = intc_mode_zero, [MODE_MASK_REG] = intc_mode_field, [MODE_DUAL_REG] = intc_mode_field, [MODE_PRIO_REG] = intc_mode_zero, [MODE_PCLR_REG] = intc_mode_field, }; #ifdef CONFIG_INTC_BALANCING static inline void intc_balancing_enable(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = dist_handle[irq]; unsigned long addr; if (irq_balancing_disabled(irq) || !handle) return; addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); intc_reg_fns[_INTC_FN(handle)](addr, handle, 1); } static inline void intc_balancing_disable(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = dist_handle[irq]; unsigned long addr; if (irq_balancing_disabled(irq) || !handle) return; addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); intc_reg_fns[_INTC_FN(handle)](addr, handle, 0); } static unsigned int intc_dist_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_mask_reg *mr = desc->hw.mask_regs; unsigned int i, j, fn, mode; unsigned long reg_e, reg_d; for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { mr = desc->hw.mask_regs + i; /* * Skip this entry if there's no auto-distribution * register associated with it. */ if (!mr->dist_reg) continue; for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { if (mr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; mode = MODE_ENABLE_REG; reg_e = mr->dist_reg; reg_d = mr->dist_reg; fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - j); } } /* * It's possible we've gotten here with no distribution options * available for the IRQ in question, so we just skip over those. */ return 0; } #else static inline void intc_balancing_enable(unsigned int irq) { } static inline void intc_balancing_disable(unsigned int irq) { } #endif static inline void _intc_enable(unsigned int irq, unsigned long handle) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long addr; unsigned int cpu; for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { #ifdef CONFIG_SMP if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) continue; #endif addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ [_INTC_FN(handle)], irq); } intc_balancing_enable(irq); } static void intc_enable(unsigned int irq) { _intc_enable(irq, (unsigned long)get_irq_chip_data(irq)); } static void intc_disable(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = (unsigned long)get_irq_chip_data(irq); unsigned long addr; unsigned int cpu; intc_balancing_disable(irq); for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { #ifdef CONFIG_SMP if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) continue; #endif addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ [_INTC_FN(handle)], irq); } } static void (*intc_enable_noprio_fns[])(unsigned long addr, unsigned long handle, void (*fn)(unsigned long, unsigned long, unsigned long), unsigned int irq) = { [MODE_ENABLE_REG] = intc_mode_field, [MODE_MASK_REG] = intc_mode_zero, [MODE_DUAL_REG] = intc_mode_field, [MODE_PRIO_REG] = intc_mode_field, [MODE_PCLR_REG] = intc_mode_field, }; static void intc_enable_disable(struct intc_desc_int *d, unsigned long handle, int do_enable) { unsigned long addr; unsigned int cpu; void (*fn)(unsigned long, unsigned long, void (*)(unsigned long, unsigned long, unsigned long), unsigned int); if (do_enable) { for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); } } else { for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); fn = intc_disable_fns[_INTC_MODE(handle)]; fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); } } } static int intc_set_wake(unsigned int irq, unsigned int on) { return 0; /* allow wakeup, but setup hardware in intc_suspend() */ } #ifdef CONFIG_SMP /* * This is held with the irq desc lock held, so we don't require any * additional locking here at the intc desc level. The affinity mask is * later tested in the enable/disable paths. */ static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask) { if (!cpumask_intersects(cpumask, cpu_online_mask)) return -1; cpumask_copy(irq_to_desc(irq)->affinity, cpumask); return 0; } #endif static void intc_mask_ack(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); unsigned long handle = ack_handle[irq]; unsigned long addr; intc_disable(irq); /* read register and write zero only to the associated bit */ if (handle) { addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); switch (_INTC_FN(handle)) { case REG_FN_MODIFY_BASE + 0: /* 8bit */ __raw_readb(addr); __raw_writeb(0xff ^ set_field(0, 1, handle), addr); break; case REG_FN_MODIFY_BASE + 1: /* 16bit */ __raw_readw(addr); __raw_writew(0xffff ^ set_field(0, 1, handle), addr); break; case REG_FN_MODIFY_BASE + 3: /* 32bit */ __raw_readl(addr); __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr); break; default: BUG(); break; } } } static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, unsigned int nr_hp, unsigned int irq) { int i; /* * this doesn't scale well, but... * * this function should only be used for cerain uncommon * operations such as intc_set_priority() and intc_set_sense() * and in those rare cases performance doesn't matter that much. * keeping the memory footprint low is more important. * * one rather simple way to speed this up and still keep the * memory footprint down is to make sure the array is sorted * and then perform a bisect to lookup the irq. */ for (i = 0; i < nr_hp; i++) { if ((hp + i)->irq != irq) continue; return hp + i; } return NULL; } int intc_set_priority(unsigned int irq, unsigned int prio) { struct intc_desc_int *d = get_intc_desc(irq); struct intc_handle_int *ihp; if (!intc_prio_level[irq] || prio <= 1) return -EINVAL; ihp = intc_find_irq(d->prio, d->nr_prio, irq); if (ihp) { if (prio >= (1 << _INTC_WIDTH(ihp->handle))) return -EINVAL; intc_prio_level[irq] = prio; /* * only set secondary masking method directly * primary masking method is using intc_prio_level[irq] * priority level will be set during next enable() */ if (_INTC_FN(ihp->handle) != REG_FN_ERR) _intc_enable(irq, ihp->handle); } return 0; } #define VALID(x) (x | 0x80) static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { [IRQ_TYPE_EDGE_FALLING] = VALID(0), [IRQ_TYPE_EDGE_RISING] = VALID(1), [IRQ_TYPE_LEVEL_LOW] = VALID(2), /* SH7706, SH7707 and SH7709 do not support high level triggered */ #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ !defined(CONFIG_CPU_SUBTYPE_SH7709) [IRQ_TYPE_LEVEL_HIGH] = VALID(3), #endif }; static int intc_set_sense(unsigned int irq, unsigned int type) { struct intc_desc_int *d = get_intc_desc(irq); unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; struct intc_handle_int *ihp; unsigned long addr; if (!value) return -EINVAL; ihp = intc_find_irq(d->sense, d->nr_sense, irq); if (ihp) { addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); } return 0; } static intc_enum __init intc_grp_id(struct intc_desc *desc, intc_enum enum_id) { struct intc_group *g = desc->hw.groups; unsigned int i, j; for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { g = desc->hw.groups + i; for (j = 0; g->enum_ids[j]; j++) { if (g->enum_ids[j] != enum_id) continue; return g->enum_id; } } return 0; } static unsigned int __init _intc_mask_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, unsigned int *reg_idx, unsigned int *fld_idx) { struct intc_mask_reg *mr = desc->hw.mask_regs; unsigned int fn, mode; unsigned long reg_e, reg_d; while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { mr = desc->hw.mask_regs + *reg_idx; for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { if (mr->enum_ids[*fld_idx] != enum_id) continue; if (mr->set_reg && mr->clr_reg) { fn = REG_FN_WRITE_BASE; mode = MODE_DUAL_REG; reg_e = mr->clr_reg; reg_d = mr->set_reg; } else { fn = REG_FN_MODIFY_BASE; if (mr->set_reg) { mode = MODE_ENABLE_REG; reg_e = mr->set_reg; reg_d = mr->set_reg; } else { mode = MODE_MASK_REG; reg_e = mr->clr_reg; reg_d = mr->clr_reg; } } fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - *fld_idx); } *fld_idx = 0; (*reg_idx)++; } return 0; } static unsigned int __init intc_mask_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int do_grps) { unsigned int i = 0; unsigned int j = 0; unsigned int ret; ret = _intc_mask_data(desc, d, enum_id, &i, &j); if (ret) return ret; if (do_grps) return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); return 0; } static unsigned int __init _intc_prio_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, unsigned int *reg_idx, unsigned int *fld_idx) { struct intc_prio_reg *pr = desc->hw.prio_regs; unsigned int fn, n, mode, bit; unsigned long reg_e, reg_d; while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { pr = desc->hw.prio_regs + *reg_idx; for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { if (pr->enum_ids[*fld_idx] != enum_id) continue; if (pr->set_reg && pr->clr_reg) { fn = REG_FN_WRITE_BASE; mode = MODE_PCLR_REG; reg_e = pr->set_reg; reg_d = pr->clr_reg; } else { fn = REG_FN_MODIFY_BASE; mode = MODE_PRIO_REG; if (!pr->set_reg) BUG(); reg_e = pr->set_reg; reg_d = pr->set_reg; } fn += (pr->reg_width >> 3) - 1; n = *fld_idx + 1; BUG_ON(n * pr->field_width > pr->reg_width); bit = pr->reg_width - (n * pr->field_width); return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), pr->field_width, bit); } *fld_idx = 0; (*reg_idx)++; } return 0; } static unsigned int __init intc_prio_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int do_grps) { unsigned int i = 0; unsigned int j = 0; unsigned int ret; ret = _intc_prio_data(desc, d, enum_id, &i, &j); if (ret) return ret; if (do_grps) return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); return 0; } static void __init intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int enable) { unsigned int i, j, data; /* go through and enable/disable all mask bits */ i = j = 0; do { data = _intc_mask_data(desc, d, enum_id, &i, &j); if (data) intc_enable_disable(d, data, enable); j++; } while (data); /* go through and enable/disable all priority fields */ i = j = 0; do { data = _intc_prio_data(desc, d, enum_id, &i, &j); if (data) intc_enable_disable(d, data, enable); j++; } while (data); } static unsigned int __init intc_ack_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_mask_reg *mr = desc->hw.ack_regs; unsigned int i, j, fn, mode; unsigned long reg_e, reg_d; for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { mr = desc->hw.ack_regs + i; for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { if (mr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; mode = MODE_ENABLE_REG; reg_e = mr->set_reg; reg_d = mr->set_reg; fn += (mr->reg_width >> 3) - 1; return _INTC_MK(fn, mode, intc_get_reg(d, reg_e), intc_get_reg(d, reg_d), 1, (mr->reg_width - 1) - j); } } return 0; } static unsigned int __init intc_sense_data(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id) { struct intc_sense_reg *sr = desc->hw.sense_regs; unsigned int i, j, fn, bit; for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { sr = desc->hw.sense_regs + i; for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { if (sr->enum_ids[j] != enum_id) continue; fn = REG_FN_MODIFY_BASE; fn += (sr->reg_width >> 3) - 1; BUG_ON((j + 1) * sr->field_width > sr->reg_width); bit = sr->reg_width - ((j + 1) * sr->field_width); return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), 0, sr->field_width, bit); } } return 0; } static void __init intc_register_irq(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, unsigned int irq) { struct intc_handle_int *hp; unsigned int data[2], primary; /* * Register the IRQ position with the global IRQ map */ set_bit(irq, intc_irq_map); /* * Prefer single interrupt source bitmap over other combinations: * * 1. bitmap, single interrupt source * 2. priority, single interrupt source * 3. bitmap, multiple interrupt sources (groups) * 4. priority, multiple interrupt sources (groups) */ data[0] = intc_mask_data(desc, d, enum_id, 0); data[1] = intc_prio_data(desc, d, enum_id, 0); primary = 0; if (!data[0] && data[1]) primary = 1; if (!data[0] && !data[1]) pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n", irq, irq2evt(irq)); data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1); data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1); if (!data[primary]) primary ^= 1; BUG_ON(!data[primary]); /* must have primary masking method */ disable_irq_nosync(irq); set_irq_chip_and_handler_name(irq, &d->chip, handle_level_irq, "level"); set_irq_chip_data(irq, (void *)data[primary]); /* * set priority level * - this needs to be at least 2 for 5-bit priorities on 7780 */ intc_prio_level[irq] = default_prio_level; /* enable secondary masking method if present */ if (data[!primary]) _intc_enable(irq, data[!primary]); /* add irq to d->prio list if priority is available */ if (data[1]) { hp = d->prio + d->nr_prio; hp->irq = irq; hp->handle = data[1]; if (primary) { /* * only secondary priority should access registers, so * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() */ hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); } d->nr_prio++; } /* add irq to d->sense list if sense is available */ data[0] = intc_sense_data(desc, d, enum_id); if (data[0]) { (d->sense + d->nr_sense)->irq = irq; (d->sense + d->nr_sense)->handle = data[0]; d->nr_sense++; } /* irq should be disabled by default */ d->chip.mask(irq); if (desc->hw.ack_regs) ack_handle[irq] = intc_ack_data(desc, d, enum_id); #ifdef CONFIG_INTC_BALANCING if (desc->hw.mask_regs) dist_handle[irq] = intc_dist_data(desc, d, enum_id); #endif #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ #endif } static unsigned int __init save_reg(struct intc_desc_int *d, unsigned int cnt, unsigned long value, unsigned int smp) { if (value) { value = intc_phys_to_virt(d, value); d->reg[cnt] = value; #ifdef CONFIG_SMP d->smp[cnt] = smp; #endif return 1; } return 0; } static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) { generic_handle_irq((unsigned int)get_irq_data(irq)); } int __init register_intc_controller(struct intc_desc *desc) { unsigned int i, k, smp; struct intc_hw_desc *hw = &desc->hw; struct intc_desc_int *d; struct resource *res; pr_info("Registered controller '%s' with %u IRQs\n", desc->name, hw->nr_vectors); d = kzalloc(sizeof(*d), GFP_NOWAIT); if (!d) goto err0; INIT_LIST_HEAD(&d->list); list_add(&d->list, &intc_list); if (desc->num_resources) { d->nr_windows = desc->num_resources; d->window = kzalloc(d->nr_windows * sizeof(*d->window), GFP_NOWAIT); if (!d->window) goto err1; for (k = 0; k < d->nr_windows; k++) { res = desc->resource + k; WARN_ON(resource_type(res) != IORESOURCE_MEM); d->window[k].phys = res->start; d->window[k].size = resource_size(res); d->window[k].virt = ioremap_nocache(res->start, resource_size(res)); if (!d->window[k].virt) goto err2; } } d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; #ifdef CONFIG_INTC_BALANCING if (d->nr_reg) d->nr_reg += hw->nr_mask_regs; #endif d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); if (!d->reg) goto err2; #ifdef CONFIG_SMP d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); if (!d->smp) goto err3; #endif k = 0; if (hw->mask_regs) { for (i = 0; i < hw->nr_mask_regs; i++) { smp = IS_SMP(hw->mask_regs[i]); k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); #ifdef CONFIG_INTC_BALANCING k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0); #endif } } if (hw->prio_regs) { d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), GFP_NOWAIT); if (!d->prio) goto err4; for (i = 0; i < hw->nr_prio_regs; i++) { smp = IS_SMP(hw->prio_regs[i]); k += save_reg(d, k, hw->prio_regs[i].set_reg, smp); k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp); } } if (hw->sense_regs) { d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), GFP_NOWAIT); if (!d->sense) goto err5; for (i = 0; i < hw->nr_sense_regs; i++) k += save_reg(d, k, hw->sense_regs[i].reg, 0); } d->chip.name = desc->name; d->chip.mask = intc_disable; d->chip.unmask = intc_enable; d->chip.mask_ack = intc_disable; d->chip.enable = intc_enable; d->chip.disable = intc_disable; d->chip.shutdown = intc_disable; d->chip.set_type = intc_set_sense; d->chip.set_wake = intc_set_wake; #ifdef CONFIG_SMP d->chip.set_affinity = intc_set_affinity; #endif if (hw->ack_regs) { for (i = 0; i < hw->nr_ack_regs; i++) k += save_reg(d, k, hw->ack_regs[i].set_reg, 0); d->chip.mask_ack = intc_mask_ack; } /* disable bits matching force_disable before registering irqs */ if (desc->force_disable) intc_enable_disable_enum(desc, d, desc->force_disable, 0); /* disable bits matching force_enable before registering irqs */ if (desc->force_enable) intc_enable_disable_enum(desc, d, desc->force_enable, 0); BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ /* register the vectors one by one */ for (i = 0; i < hw->nr_vectors; i++) { struct intc_vect *vect = hw->vectors + i; unsigned int irq = evt2irq(vect->vect); struct irq_desc *irq_desc; if (!vect->enum_id) continue; irq_desc = irq_to_desc_alloc_node(irq, numa_node_id()); if (unlikely(!irq_desc)) { pr_err("can't get irq_desc for %d\n", irq); continue; } intc_register_irq(desc, d, vect->enum_id, irq); for (k = i + 1; k < hw->nr_vectors; k++) { struct intc_vect *vect2 = hw->vectors + k; unsigned int irq2 = evt2irq(vect2->vect); if (vect->enum_id != vect2->enum_id) continue; /* * In the case of multi-evt handling and sparse * IRQ support, each vector still needs to have * its own backing irq_desc. */ irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id()); if (unlikely(!irq_desc)) { pr_err("can't get irq_desc for %d\n", irq2); continue; } vect2->enum_id = 0; /* redirect this interrupts to the first one */ set_irq_chip(irq2, &dummy_irq_chip); set_irq_chained_handler(irq2, intc_redirect_irq); set_irq_data(irq2, (void *)irq); } } /* enable bits matching force_enable after registering irqs */ if (desc->force_enable) intc_enable_disable_enum(desc, d, desc->force_enable, 1); return 0; err5: kfree(d->prio); err4: #ifdef CONFIG_SMP kfree(d->smp); err3: #endif kfree(d->reg); err2: for (k = 0; k < d->nr_windows; k++) if (d->window[k].virt) iounmap(d->window[k].virt); kfree(d->window); err1: kfree(d); err0: pr_err("unable to allocate INTC memory\n"); return -ENOMEM; } #ifdef CONFIG_INTC_USERIMASK static void __iomem *uimask; int register_intc_userimask(unsigned long addr) { if (unlikely(uimask)) return -EBUSY; uimask = ioremap_nocache(addr, SZ_4K); if (unlikely(!uimask)) return -ENOMEM; pr_info("userimask support registered for levels 0 -> %d\n", default_prio_level - 1); return 0; } static ssize_t show_intc_userimask(struct sysdev_class *cls, struct sysdev_class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf); } static ssize_t store_intc_userimask(struct sysdev_class *cls, struct sysdev_class_attribute *attr, const char *buf, size_t count) { unsigned long level; level = simple_strtoul(buf, NULL, 10); /* * Minimal acceptable IRQ levels are in the 2 - 16 range, but * these are chomped so as to not interfere with normal IRQs. * * Level 1 is a special case on some CPUs in that it's not * directly settable, but given that USERIMASK cuts off below a * certain level, we don't care about this limitation here. * Level 0 on the other hand equates to user masking disabled. * * We use default_prio_level as a cut off so that only special * case opt-in IRQs can be mangled. */ if (level >= default_prio_level) return -EINVAL; __raw_writel(0xa5 << 24 | level << 4, uimask); return count; } static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR, show_intc_userimask, store_intc_userimask); #endif static ssize_t show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { struct intc_desc_int *d; d = container_of(dev, struct intc_desc_int, sysdev); return sprintf(buf, "%s\n", d->chip.name); } static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL); static int intc_suspend(struct sys_device *dev, pm_message_t state) { struct intc_desc_int *d; struct irq_desc *desc; int irq; /* get intc controller associated with this sysdev */ d = container_of(dev, struct intc_desc_int, sysdev); switch (state.event) { case PM_EVENT_ON: if (d->state.event != PM_EVENT_FREEZE) break; for_each_irq_desc(irq, desc) { if (desc->handle_irq == intc_redirect_irq) continue; if (desc->chip != &d->chip) continue; if (desc->status & IRQ_DISABLED) intc_disable(irq); else intc_enable(irq); } break; case PM_EVENT_FREEZE: /* nothing has to be done */ break; case PM_EVENT_SUSPEND: /* enable wakeup irqs belonging to this intc controller */ for_each_irq_desc(irq, desc) { if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) intc_enable(irq); } break; } d->state = state; return 0; } static int intc_resume(struct sys_device *dev) { return intc_suspend(dev, PMSG_ON); } static struct sysdev_class intc_sysdev_class = { .name = "intc", .suspend = intc_suspend, .resume = intc_resume, }; /* register this intc as sysdev to allow suspend/resume */ static int __init register_intc_sysdevs(void) { struct intc_desc_int *d; int error; int id = 0; error = sysdev_class_register(&intc_sysdev_class); #ifdef CONFIG_INTC_USERIMASK if (!error && uimask) error = sysdev_class_create_file(&intc_sysdev_class, &attr_userimask); #endif if (!error) { list_for_each_entry(d, &intc_list, list) { d->sysdev.id = id; d->sysdev.cls = &intc_sysdev_class; error = sysdev_register(&d->sysdev); if (error == 0) error = sysdev_create_file(&d->sysdev, &attr_name); if (error) break; id++; } } if (error) pr_err("sysdev registration error\n"); return error; } device_initcall(register_intc_sysdevs); /* * Dynamic IRQ allocation and deallocation */ unsigned int create_irq_nr(unsigned int irq_want, int node) { unsigned int irq = 0, new; unsigned long flags; struct irq_desc *desc; spin_lock_irqsave(&vector_lock, flags); /* * First try the wanted IRQ */ if (test_and_set_bit(irq_want, intc_irq_map) == 0) { new = irq_want; } else { /* .. then fall back to scanning. */ new = find_first_zero_bit(intc_irq_map, nr_irqs); if (unlikely(new == nr_irqs)) goto out_unlock; __set_bit(new, intc_irq_map); } desc = irq_to_desc_alloc_node(new, node); if (unlikely(!desc)) { pr_err("can't get irq_desc for %d\n", new); goto out_unlock; } desc = move_irq_desc(desc, node); irq = new; out_unlock: spin_unlock_irqrestore(&vector_lock, flags); if (irq > 0) { dynamic_irq_init(irq); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ #endif } return irq; } int create_irq(void) { int nid = cpu_to_node(smp_processor_id()); int irq; irq = create_irq_nr(NR_IRQS_LEGACY, nid); if (irq == 0) irq = -1; return irq; } void destroy_irq(unsigned int irq) { unsigned long flags; dynamic_irq_cleanup(irq); spin_lock_irqsave(&vector_lock, flags); __clear_bit(irq, intc_irq_map); spin_unlock_irqrestore(&vector_lock, flags); } int reserve_irq_vector(unsigned int irq) { unsigned long flags; int ret = 0; spin_lock_irqsave(&vector_lock, flags); if (test_and_set_bit(irq, intc_irq_map)) ret = -EBUSY; spin_unlock_irqrestore(&vector_lock, flags); return ret; } void reserve_irq_legacy(void) { unsigned long flags; int i, j; spin_lock_irqsave(&vector_lock, flags); j = find_first_bit(intc_irq_map, nr_irqs); for (i = 0; i < j; i++) __set_bit(i, intc_irq_map); spin_unlock_irqrestore(&vector_lock, flags); }
gpl-2.0
croniccorey/cronmod-kernel
arch/blackfin/mach-bf537/boards/pnav10.c
854
12931
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI PNAV-1.0"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) static struct resource bfin_pcmcia_cf_resources[] = { { .start = 0x20310000, /* IO PORT */ .end = 0x20312000, .flags = IORESOURCE_MEM, }, { .start = 0x20311000, /* Attribute Memory */ .end = 0x20311FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, { .start = 6, /* Card Detect PF6 */ .end = 6, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pcmcia_cf_device = { .name = "bfin_cf_pcmcia", .id = -1, .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), .resource = bfin_pcmcia_cf_resources, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev.platform_data = &bfin_mii_bus, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif #if defined(CONFIG_BFIN_SPI_ADC) \ || defined(CONFIG_BFIN_SPI_ADC_MODULE) /* SPI ADC chip */ static struct bfin5xx_spi_chip spi_adc_chip_info = { .enable_dma = 1, /* use dma transfer with this chip*/ .bits_per_word = 16, }; #endif #if defined(CONFIG_SND_BLACKFIN_AD183X) \ || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static struct bfin5xx_spi_chip spi_ad7877_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_BFIN_SPI_ADC) \ || defined(CONFIG_BFIN_SPI_ADC_MODULE) { .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. */ .platform_data = NULL, /* No spi_driver specific config */ .controller_data = &spi_adc_chip_info, }, #endif #if defined(CONFIG_SND_BLACKFIN_AD183X) \ || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, .controller_data = &ad1836_spi_chip_info, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF2, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &spi_ad7877_chip_info, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) static struct platform_device bfin_fb_device = { .name = "bf537-lq035", }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) &bfin_pcmcia_cf_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE) &bfin_fb_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif }; static int __init pnav_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif return 0; } arch_initcall(pnav_init); static struct platform_device *stamp_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(stamp_early_devices, ARRAY_SIZE(stamp_early_devices)); } void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
ahnchan2/linux
drivers/spi/spi-st-ssc4.c
854
11774
/* * Copyright (c) 2008-2014 STMicroelectronics Limited * * Author: Angus Clark <Angus.Clark@st.com> * Patrice Chotard <patrice.chotard@st.com> * Lee Jones <lee.jones@linaro.org> * * SPI master mode controller driver, used in STMicroelectronics devices. * * May be copied or modified under the terms of the GNU General Public * License Version 2.0 only. See linux/COPYING for more information. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/pm_runtime.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> /* SSC registers */ #define SSC_BRG 0x000 #define SSC_TBUF 0x004 #define SSC_RBUF 0x008 #define SSC_CTL 0x00C #define SSC_IEN 0x010 #define SSC_I2C 0x018 /* SSC Control */ #define SSC_CTL_DATA_WIDTH_9 0x8 #define SSC_CTL_DATA_WIDTH_MSK 0xf #define SSC_CTL_BM 0xf #define SSC_CTL_HB BIT(4) #define SSC_CTL_PH BIT(5) #define SSC_CTL_PO BIT(6) #define SSC_CTL_SR BIT(7) #define SSC_CTL_MS BIT(8) #define SSC_CTL_EN BIT(9) #define SSC_CTL_LPB BIT(10) #define SSC_CTL_EN_TX_FIFO BIT(11) #define SSC_CTL_EN_RX_FIFO BIT(12) #define SSC_CTL_EN_CLST_RX BIT(13) /* SSC Interrupt Enable */ #define SSC_IEN_TEEN BIT(2) #define FIFO_SIZE 8 struct spi_st { /* SSC SPI Controller */ void __iomem *base; struct clk *clk; struct device *dev; /* SSC SPI current transaction */ const u8 *tx_ptr; u8 *rx_ptr; u16 bytes_per_word; unsigned int words_remaining; unsigned int baud; struct completion done; }; static int spi_st_clk_enable(struct spi_st *spi_st) { /* * Current platforms use one of the core clocks for SPI and I2C. * If we attempt to disable the clock, the system will hang. * * TODO: Remove this when platform supports power domains. */ return 0; return clk_prepare_enable(spi_st->clk); } static void spi_st_clk_disable(struct spi_st *spi_st) { /* * Current platforms use one of the core clocks for SPI and I2C. * If we attempt to disable the clock, the system will hang. * * TODO: Remove this when platform supports power domains. */ return; clk_disable_unprepare(spi_st->clk); } /* Load the TX FIFO */ static void ssc_write_tx_fifo(struct spi_st *spi_st) { unsigned int count, i; uint32_t word = 0; if (spi_st->words_remaining > FIFO_SIZE) count = FIFO_SIZE; else count = spi_st->words_remaining; for (i = 0; i < count; i++) { if (spi_st->tx_ptr) { if (spi_st->bytes_per_word == 1) { word = *spi_st->tx_ptr++; } else { word = *spi_st->tx_ptr++; word = *spi_st->tx_ptr++ | (word << 8); } } writel_relaxed(word, spi_st->base + SSC_TBUF); } } /* Read the RX FIFO */ static void ssc_read_rx_fifo(struct spi_st *spi_st) { unsigned int count, i; uint32_t word = 0; if (spi_st->words_remaining > FIFO_SIZE) count = FIFO_SIZE; else count = spi_st->words_remaining; for (i = 0; i < count; i++) { word = readl_relaxed(spi_st->base + SSC_RBUF); if (spi_st->rx_ptr) { if (spi_st->bytes_per_word == 1) { *spi_st->rx_ptr++ = (uint8_t)word; } else { *spi_st->rx_ptr++ = (word >> 8); *spi_st->rx_ptr++ = word & 0xff; } } } spi_st->words_remaining -= count; } static int spi_st_transfer_one(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct spi_st *spi_st = spi_master_get_devdata(master); uint32_t ctl = 0; /* Setup transfer */ spi_st->tx_ptr = t->tx_buf; spi_st->rx_ptr = t->rx_buf; if (spi->bits_per_word > 8) { /* * Anything greater than 8 bits-per-word requires 2 * bytes-per-word in the RX/TX buffers */ spi_st->bytes_per_word = 2; spi_st->words_remaining = t->len / 2; } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) { /* * If transfer is even-length, and 8 bits-per-word, then * implement as half-length 16 bits-per-word transfer */ spi_st->bytes_per_word = 2; spi_st->words_remaining = t->len / 2; /* Set SSC_CTL to 16 bits-per-word */ ctl = readl_relaxed(spi_st->base + SSC_CTL); writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL); readl_relaxed(spi_st->base + SSC_RBUF); } else { spi_st->bytes_per_word = 1; spi_st->words_remaining = t->len; } reinit_completion(&spi_st->done); /* Start transfer by writing to the TX FIFO */ ssc_write_tx_fifo(spi_st); writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN); /* Wait for transfer to complete */ wait_for_completion(&spi_st->done); /* Restore SSC_CTL if necessary */ if (ctl) writel_relaxed(ctl, spi_st->base + SSC_CTL); spi_finalize_current_transfer(spi->master); return t->len; } static void spi_st_cleanup(struct spi_device *spi) { int cs = spi->cs_gpio; if (gpio_is_valid(cs)) devm_gpio_free(&spi->dev, cs); } /* the spi->mode bits understood by this driver: */ #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH) static int spi_st_setup(struct spi_device *spi) { struct spi_st *spi_st = spi_master_get_devdata(spi->master); u32 spi_st_clk, sscbrg, var; u32 hz = spi->max_speed_hz; int cs = spi->cs_gpio; int ret; if (!hz) { dev_err(&spi->dev, "max_speed_hz unspecified\n"); return -EINVAL; } if (!gpio_is_valid(cs)) { dev_err(&spi->dev, "%d is not a valid gpio\n", cs); return -EINVAL; } if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) { dev_err(&spi->dev, "could not request gpio:%d\n", cs); return -EINVAL; } ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH); if (ret) return ret; spi_st_clk = clk_get_rate(spi_st->clk); /* Set SSC_BRF */ sscbrg = spi_st_clk / (2 * hz); if (sscbrg < 0x07 || sscbrg > BIT(16)) { dev_err(&spi->dev, "baudrate %d outside valid range %d\n", sscbrg, hz); return -EINVAL; } spi_st->baud = spi_st_clk / (2 * sscbrg); if (sscbrg == BIT(16)) /* 16-bit counter wraps */ sscbrg = 0x0; writel_relaxed(sscbrg, spi_st->base + SSC_BRG); dev_dbg(&spi->dev, "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n", hz, spi_st->baud, sscbrg); /* Set SSC_CTL and enable SSC */ var = readl_relaxed(spi_st->base + SSC_CTL); var |= SSC_CTL_MS; if (spi->mode & SPI_CPOL) var |= SSC_CTL_PO; else var &= ~SSC_CTL_PO; if (spi->mode & SPI_CPHA) var |= SSC_CTL_PH; else var &= ~SSC_CTL_PH; if ((spi->mode & SPI_LSB_FIRST) == 0) var |= SSC_CTL_HB; else var &= ~SSC_CTL_HB; if (spi->mode & SPI_LOOP) var |= SSC_CTL_LPB; else var &= ~SSC_CTL_LPB; var &= ~SSC_CTL_DATA_WIDTH_MSK; var |= (spi->bits_per_word - 1); var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO; var |= SSC_CTL_EN; writel_relaxed(var, spi_st->base + SSC_CTL); /* Clear the status register */ readl_relaxed(spi_st->base + SSC_RBUF); return 0; } /* Interrupt fired when TX shift register becomes empty */ static irqreturn_t spi_st_irq(int irq, void *dev_id) { struct spi_st *spi_st = (struct spi_st *)dev_id; /* Read RX FIFO */ ssc_read_rx_fifo(spi_st); /* Fill TX FIFO */ if (spi_st->words_remaining) { ssc_write_tx_fifo(spi_st); } else { /* TX/RX complete */ writel_relaxed(0x0, spi_st->base + SSC_IEN); /* * read SSC_IEN to ensure that this bit is set * before re-enabling interrupt */ readl(spi_st->base + SSC_IEN); complete(&spi_st->done); } return IRQ_HANDLED; } static int spi_st_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct spi_master *master; struct resource *res; struct spi_st *spi_st; int irq, ret = 0; u32 var; master = spi_alloc_master(&pdev->dev, sizeof(*spi_st)); if (!master) return -ENOMEM; master->dev.of_node = np; master->mode_bits = MODEBITS; master->setup = spi_st_setup; master->cleanup = spi_st_cleanup; master->transfer_one = spi_st_transfer_one; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->auto_runtime_pm = true; master->bus_num = pdev->id; spi_st = spi_master_get_devdata(master); spi_st->clk = devm_clk_get(&pdev->dev, "ssc"); if (IS_ERR(spi_st->clk)) { dev_err(&pdev->dev, "Unable to request clock\n"); return PTR_ERR(spi_st->clk); } ret = spi_st_clk_enable(spi_st); if (ret) return ret; init_completion(&spi_st->done); /* Get resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi_st->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(spi_st->base)) { ret = PTR_ERR(spi_st->base); goto clk_disable; } /* Disable I2C and Reset SSC */ writel_relaxed(0x0, spi_st->base + SSC_I2C); var = readw_relaxed(spi_st->base + SSC_CTL); var |= SSC_CTL_SR; writel_relaxed(var, spi_st->base + SSC_CTL); udelay(1); var = readl_relaxed(spi_st->base + SSC_CTL); var &= ~SSC_CTL_SR; writel_relaxed(var, spi_st->base + SSC_CTL); /* Set SSC into slave mode before reconfiguring PIO pins */ var = readl_relaxed(spi_st->base + SSC_CTL); var &= ~SSC_CTL_MS; writel_relaxed(var, spi_st->base + SSC_CTL); irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&pdev->dev, "IRQ missing or invalid\n"); ret = -EINVAL; goto clk_disable; } ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0, pdev->name, spi_st); if (ret) { dev_err(&pdev->dev, "Failed to request irq %d\n", irq); goto clk_disable; } /* by default the device is on */ pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); platform_set_drvdata(pdev, master); ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Failed to register master\n"); goto clk_disable; } return 0; clk_disable: spi_st_clk_disable(spi_st); return ret; } static int spi_st_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct spi_st *spi_st = spi_master_get_devdata(master); spi_st_clk_disable(spi_st); pinctrl_pm_select_sleep_state(&pdev->dev); return 0; } #ifdef CONFIG_PM static int spi_st_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct spi_st *spi_st = spi_master_get_devdata(master); writel_relaxed(0, spi_st->base + SSC_IEN); pinctrl_pm_select_sleep_state(dev); spi_st_clk_disable(spi_st); return 0; } static int spi_st_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct spi_st *spi_st = spi_master_get_devdata(master); int ret; ret = spi_st_clk_enable(spi_st); pinctrl_pm_select_default_state(dev); return ret; } #endif #ifdef CONFIG_PM_SLEEP static int spi_st_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); int ret; ret = spi_master_suspend(master); if (ret) return ret; return pm_runtime_force_suspend(dev); } static int spi_st_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); int ret; ret = spi_master_resume(master); if (ret) return ret; return pm_runtime_force_resume(dev); } #endif static const struct dev_pm_ops spi_st_pm = { SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume) SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL) }; static const struct of_device_id stm_spi_match[] = { { .compatible = "st,comms-ssc4-spi", }, {}, }; MODULE_DEVICE_TABLE(of, stm_spi_match); static struct platform_driver spi_st_driver = { .driver = { .name = "spi-st", .pm = &spi_st_pm, .of_match_table = of_match_ptr(stm_spi_match), }, .probe = spi_st_probe, .remove = spi_st_remove, }; module_platform_driver(spi_st_driver); MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>"); MODULE_DESCRIPTION("STM SSC SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
ioz9/GT-N7000-2.35.y-samsung-update1
drivers/input/mouse/alps.c
2390
22775
/* * ALPS touchpad PS/2 mouse driver * * Copyright (c) 2003 Neil Brown <neilb@cse.unsw.edu.au> * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com> * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru> * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net> * * ALPS detection, tap switching and status querying info is taken from * tpconfig utility (by C. Scott Ananian and Bruce Kall). * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "alps.h" #undef DEBUG #ifdef DEBUG #define dbg(format, arg...) printk(KERN_INFO "alps.c: " format "\n", ## arg) #else #define dbg(format, arg...) do {} while (0) #endif #define ALPS_OLDPROTO 0x01 /* old style input */ #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ #define ALPS_PASS 0x04 /* device has a pass-through port */ #define ALPS_WHEEL 0x08 /* hardware wheel present */ #define ALPS_FW_BK_1 0x10 /* front & back buttons present */ #define ALPS_FW_BK_2 0x20 /* front & back buttons present */ #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */ { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 }, { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 }, { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 }, /* HP ze1115 */ { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 }, { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 }, { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 }, { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */ { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */ { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 }, { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */ { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ }; /* * XXX - this entry is suspicious. First byte has zero lower nibble, * which is what a normal mouse would report. Also, the value 0x0e * isn't valid per PS/2 spec. */ /* * PS/2 packet format * * byte 0: 0 0 YSGN XSGN 1 M R L * byte 1: X7 X6 X5 X4 X3 X2 X1 X0 * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * * Note that the device never signals overflow condition. * * ALPS absolute Mode - new format * * byte 0: 1 ? ? ? 1 ? ? ? * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 * byte 2: 0 x10 x9 x8 x7 ? fin ges * byte 3: 0 y9 y8 y7 1 M R L * byte 4: 0 y6 y5 y4 y3 y2 y1 y0 * byte 5: 0 z6 z5 z4 z3 z2 z1 z0 * * Dualpoint device -- interleaved packet format * * byte 0: 1 1 0 0 1 1 1 1 * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 * byte 2: 0 x10 x9 x8 x7 0 fin ges * byte 3: 0 0 YSGN XSGN 1 1 1 1 * byte 4: X7 X6 X5 X4 X3 X2 X1 X0 * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 * byte 6: 0 y9 y8 y7 1 m r l * byte 7: 0 y6 y5 y4 y3 y2 y1 y0 * byte 8: 0 z6 z5 z4 z3 z2 z1 z0 * * CAPITALS = stick, miniscules = touchpad * * ?'s can have different meanings on different models, * such as wheel rotation, extra buttons, stick buttons * on a dualpoint, etc. */ static bool alps_is_valid_first_byte(const struct alps_model_info *model, unsigned char data) { return (data & model->mask0) == model->byte0; } static void alps_report_buttons(struct psmouse *psmouse, struct input_dev *dev1, struct input_dev *dev2, int left, int right, int middle) { struct input_dev *dev; /* * If shared button has already been reported on the * other device (dev2) then this event should be also * sent through that device. */ dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_LEFT, left); dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_RIGHT, right); dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; input_report_key(dev, BTN_MIDDLE, middle); /* * Sync the _other_ device now, we'll do the first * device later once we report the rest of the events. */ input_sync(dev2); } static void alps_process_packet(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x, y, z, ges, fin, left, right, middle; int back = 0, forward = 0; if (model->flags & ALPS_OLDPROTO) { left = packet[2] & 0x10; right = packet[2] & 0x08; middle = 0; x = packet[1] | ((packet[0] & 0x07) << 7); y = packet[4] | ((packet[3] & 0x07) << 7); z = packet[5]; } else { left = packet[3] & 1; right = packet[3] & 2; middle = packet[3] & 4; x = packet[1] | ((packet[2] & 0x78) << (7 - 3)); y = packet[4] | ((packet[3] & 0x70) << (7 - 4)); z = packet[5]; } if (model->flags & ALPS_FW_BK_1) { back = packet[0] & 0x10; forward = packet[2] & 4; } if (model->flags & ALPS_FW_BK_2) { back = packet[3] & 4; forward = packet[2] & 4; if ((middle = forward && back)) forward = back = 0; } ges = packet[2] & 1; fin = packet[2] & 2; if ((model->flags & ALPS_DUALPOINT) && z == 127) { input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); alps_report_buttons(psmouse, dev2, dev, left, right, middle); input_sync(dev2); return; } alps_report_buttons(psmouse, dev, dev2, left, right, middle); /* Convert hardware tap to a reasonable Z value */ if (ges && !fin) z = 40; /* * A "tap and drag" operation is reported by the hardware as a transition * from (!fin && ges) to (fin && ges). This should be translated to the * sequence Z>0, Z==0, Z>0, so the Z==0 event has to be generated manually. */ if (ges && fin && !priv->prev_fin) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_report_abs(dev, ABS_PRESSURE, 0); input_report_key(dev, BTN_TOOL_FINGER, 0); input_sync(dev); } priv->prev_fin = fin; if (z > 30) input_report_key(dev, BTN_TOUCH, 1); if (z < 25) input_report_key(dev, BTN_TOUCH, 0); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_report_key(dev, BTN_TOOL_FINGER, z > 0); if (model->flags & ALPS_WHEEL) input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07)); if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { input_report_key(dev, BTN_FORWARD, forward); input_report_key(dev, BTN_BACK, back); } if (model->flags & ALPS_FOUR_BUTTONS) { input_report_key(dev, BTN_0, packet[2] & 4); input_report_key(dev, BTN_1, packet[0] & 0x10); input_report_key(dev, BTN_2, packet[3] & 4); input_report_key(dev, BTN_3, packet[0] & 0x20); } input_sync(dev); } static void alps_report_bare_ps2_packet(struct psmouse *psmouse, unsigned char packet[], bool report_buttons) { struct alps_data *priv = psmouse->private; struct input_dev *dev2 = priv->dev2; if (report_buttons) alps_report_buttons(psmouse, dev2, psmouse->dev, packet[0] & 1, packet[0] & 2, packet[0] & 4); input_report_rel(dev2, REL_X, packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); input_report_rel(dev2, REL_Y, packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); input_sync(dev2); } static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; if (psmouse->pktcnt < 6) return PSMOUSE_GOOD_DATA; if (psmouse->pktcnt == 6) { /* * Start a timer to flush the packet if it ends up last * 6-byte packet in the stream. Timer needs to fire * psmouse core times out itself. 20 ms should be enough * to decide if we are getting more data or not. */ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20)); return PSMOUSE_GOOD_DATA; } del_timer(&priv->timer); if (psmouse->packet[6] & 0x80) { /* * Highest bit is set - that means we either had * complete ALPS packet and this is start of the * next packet or we got garbage. */ if (((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) || (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) { dbg("refusing packet %x %x %x %x " "(suspected interleaved ps/2)\n", psmouse->packet[3], psmouse->packet[4], psmouse->packet[5], psmouse->packet[6]); return PSMOUSE_BAD_DATA; } alps_process_packet(psmouse); /* Continue with the next packet */ psmouse->packet[0] = psmouse->packet[6]; psmouse->pktcnt = 1; } else { /* * High bit is 0 - that means that we indeed got a PS/2 * packet in the middle of ALPS packet. * * There is also possibility that we got 6-byte ALPS * packet followed by 3-byte packet from trackpoint. We * can not distinguish between these 2 scenarios but * becase the latter is unlikely to happen in course of * normal operation (user would need to press all * buttons on the pad and start moving trackpoint * without touching the pad surface) we assume former. * Even if we are wrong the wost thing that would happen * the cursor would jump but we should not get protocol * desynchronization. */ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], false); /* * Continue with the standard ALPS protocol handling, * but make sure we won't process it as an interleaved * packet again, which may happen if all buttons are * pressed. To avoid this let's reset the 4th bit which * is normally 1. */ psmouse->packet[3] = psmouse->packet[6] & 0xf7; psmouse->pktcnt = 4; } return PSMOUSE_GOOD_DATA; } static void alps_flush_packet(unsigned long data) { struct psmouse *psmouse = (struct psmouse *)data; serio_pause_rx(psmouse->ps2dev.serio); if (psmouse->pktcnt == 6) { /* * We did not any more data in reasonable amount of time. * Validate the last 3 bytes and process as a standard * ALPS packet. */ if ((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) { dbg("refusing packet %x %x %x " "(suspected interleaved ps/2)\n", psmouse->packet[3], psmouse->packet[4], psmouse->packet[5]); } else { alps_process_packet(psmouse); } psmouse->pktcnt = 0; } serio_continue_rx(psmouse->ps2dev.serio); } static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ if (psmouse->pktcnt == 3) { alps_report_bare_ps2_packet(psmouse, psmouse->packet, true); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ if ((model->flags & ALPS_PS2_INTERLEAVED) && psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { return alps_handle_interleaved_ps2(psmouse); } if (!alps_is_valid_first_byte(model, psmouse->packet[0])) { dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", psmouse->packet[0], model->mask0, model->byte0); return PSMOUSE_BAD_DATA; } /* Bytes 2 - 6 should have 0 in the highest bit */ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { dbg("refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); return PSMOUSE_BAD_DATA; } if (psmouse->pktcnt == 6) { alps_process_packet(psmouse); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version) { struct ps2dev *ps2dev = &psmouse->ps2dev; static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 }; unsigned char param[4]; int i; /* * First try "E6 report". * ALPS should return 0,0,10 or 0,0,100 */ param[0] = 0; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11)) return NULL; param[0] = param[1] = param[2] = 0xff; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return NULL; dbg("E6 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100)) return NULL; /* * Now try "E7 report". Allowed responses are in * alps_model_data[].signature */ param[0] = 0; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21)) return NULL; param[0] = param[1] = param[2] = 0xff; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return NULL; dbg("E7 report: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); if (version) { for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++) /* empty */; *version = (param[0] << 8) | (param[1] << 4) | i; } for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) if (!memcmp(param, alps_model_data[i].signature, sizeof(alps_model_data[i].signature))) return alps_model_data + i; return NULL; } /* * For DualPoint devices select the device that should respond to * subsequent commands. It looks like glidepad is behind stickpointer, * I'd thought it would be other way around... */ static int alps_passthrough_mode(struct psmouse *psmouse, bool enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11; if (ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) return -1; /* we may get 3 more bytes, just ignore them */ ps2_drain(ps2dev, 3, 100); return 0; } static int alps_absolute_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* Try ALPS magic knock - 4 disable before enable */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) return -1; /* * Switch mouse to poll (remote) mode so motion data will not * get in our way */ return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); } static int alps_get_status(struct psmouse *psmouse, char *param) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* Get status: 0xF5 0xF5 0xF5 0xE9 */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; dbg("Status: %2.2x %2.2x %2.2x", param[0], param[1], param[2]); return 0; } /* * Turn touchpad tapping on or off. The sequences are: * 0xE9 0xF5 0xF5 0xF3 0x0A to enable, * 0xE9 0xF5 0xF5 0xE8 0x00 to disable. * My guess that 0xE9 (GetInfo) is here as a sync point. * For models that also have stickpointer (DualPoints) its tapping * is controlled separately (0xE6 0xE6 0xE6 0xF3 0x14|0x0A) but * we don't fiddle with it. */ static int alps_tap_mode(struct psmouse *psmouse, int enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETRATE : PSMOUSE_CMD_SETRES; unsigned char tap_arg = enable ? 0x0A : 0x00; unsigned char param[4]; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, &tap_arg, cmd)) return -1; if (alps_get_status(psmouse, param)) return -1; return 0; } /* * alps_poll() - poll the touchpad for current motion packet. * Used in resync. */ static int alps_poll(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char buf[6]; bool poll_failed; if (priv->i->flags & ALPS_PASS) alps_passthrough_mode(psmouse, true); poll_failed = ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0; if (priv->i->flags & ALPS_PASS) alps_passthrough_mode(psmouse, false); if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0) return -1; if ((psmouse->badbyte & 0xc8) == 0x08) { /* * Poll the track stick ... */ if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8))) return -1; } memcpy(psmouse->packet, buf, sizeof(buf)); return 0; } static int alps_hw_init(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; const struct alps_model_info *model = priv->i; if ((model->flags & ALPS_PASS) && alps_passthrough_mode(psmouse, true)) { return -1; } if (alps_tap_mode(psmouse, true)) { printk(KERN_WARNING "alps.c: Failed to enable hardware tapping\n"); return -1; } if (alps_absolute_mode(psmouse)) { printk(KERN_ERR "alps.c: Failed to enable absolute mode\n"); return -1; } if ((model->flags & ALPS_PASS) && alps_passthrough_mode(psmouse, false)) { return -1; } /* ALPS needs stream mode, otherwise it won't report any data */ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) { printk(KERN_ERR "alps.c: Failed to enable stream mode\n"); return -1; } return 0; } static int alps_reconnect(struct psmouse *psmouse) { const struct alps_model_info *model; psmouse_reset(psmouse); model = alps_get_model(psmouse, NULL); if (!model) return -1; return alps_hw_init(psmouse); } static void alps_disconnect(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; psmouse_reset(psmouse); del_timer_sync(&priv->timer); input_unregister_device(priv->dev2); kfree(priv); } int alps_init(struct psmouse *psmouse) { struct alps_data *priv; const struct alps_model_info *model; struct input_dev *dev1 = psmouse->dev, *dev2; int version; priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); dev2 = input_allocate_device(); if (!priv || !dev2) goto init_fail; priv->dev2 = dev2; setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); psmouse->private = priv; model = alps_get_model(psmouse, &version); if (!model) goto init_fail; priv->i = model; if (alps_hw_init(psmouse)) goto init_fail; /* * Undo part of setup done for us by psmouse core since touchpad * is not a relative device. */ __clear_bit(EV_REL, dev1->evbit); __clear_bit(REL_X, dev1->relbit); __clear_bit(REL_Y, dev1->relbit); /* * Now set up our capabilities. */ dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY); dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH); dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER); dev1->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS); input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0); input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); if (model->flags & ALPS_WHEEL) { dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL); dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL); } if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD); dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK); } if (model->flags & ALPS_FOUR_BUTTONS) { dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0); dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1); dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2); dev1->keybit[BIT_WORD(BTN_3)] |= BIT_MASK(BTN_3); } else { dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE); } snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys; dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_ALPS; dev2->id.version = 0x0000; dev2->dev.parent = &psmouse->ps2dev.serio->dev; dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); dev2->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); if (input_register_device(priv->dev2)) goto init_fail; psmouse->protocol_handler = alps_process_byte; psmouse->poll = alps_poll; psmouse->disconnect = alps_disconnect; psmouse->reconnect = alps_reconnect; psmouse->pktsize = 6; /* We are having trouble resyncing ALPS touchpads so disable it for now */ psmouse->resync_time = 0; return 0; init_fail: psmouse_reset(psmouse); input_free_device(dev2); kfree(priv); psmouse->private = NULL; return -1; } int alps_detect(struct psmouse *psmouse, bool set_properties) { int version; const struct alps_model_info *model; model = alps_get_model(psmouse, &version); if (!model) return -1; if (set_properties) { psmouse->vendor = "ALPS"; psmouse->name = model->flags & ALPS_DUALPOINT ? "DualPoint TouchPad" : "GlidePoint"; psmouse->model = version; } return 0; }
gpl-2.0
LiquidSmooth-Devices/android_kernel_moto_shamu
arch/x86/pci/bus_numa.c
2646
3233
#include <linux/init.h> #include <linux/pci.h> #include <linux/range.h> #include "bus_numa.h" LIST_HEAD(pci_root_infos); static struct pci_root_info *x86_find_pci_root_info(int bus) { struct pci_root_info *info; if (list_empty(&pci_root_infos)) return NULL; list_for_each_entry(info, &pci_root_infos, list) if (info->busn.start == bus) return info; return NULL; } void x86_pci_root_bus_resources(int bus, struct list_head *resources) { struct pci_root_info *info = x86_find_pci_root_info(bus); struct pci_root_res *root_res; struct pci_host_bridge_window *window; bool found = false; if (!info) goto default_resources; printk(KERN_DEBUG "PCI: root bus %02x: hardware-probed resources\n", bus); /* already added by acpi ? */ list_for_each_entry(window, resources, list) if (window->res->flags & IORESOURCE_BUS) { found = true; break; } if (!found) pci_add_resource(resources, &info->busn); list_for_each_entry(root_res, &info->resources, list) { struct resource *res; struct resource *root; res = &root_res->res; pci_add_resource(resources, res); if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; insert_resource(root, res); } return; default_resources: /* * We don't have any host bridge aperture information from the * "native host bridge drivers," e.g., amd_bus or broadcom_bus, * so fall back to the defaults historically used by pci_create_bus(). */ printk(KERN_DEBUG "PCI: root bus %02x: using default resources\n", bus); pci_add_resource(resources, &ioport_resource); pci_add_resource(resources, &iomem_resource); } struct pci_root_info __init *alloc_pci_root_info(int bus_min, int bus_max, int node, int link) { struct pci_root_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return info; sprintf(info->name, "PCI Bus #%02x", bus_min); INIT_LIST_HEAD(&info->resources); info->busn.name = info->name; info->busn.start = bus_min; info->busn.end = bus_max; info->busn.flags = IORESOURCE_BUS; info->node = node; info->link = link; list_add_tail(&info->list, &pci_root_infos); return info; } void update_res(struct pci_root_info *info, resource_size_t start, resource_size_t end, unsigned long flags, int merge) { struct resource *res; struct pci_root_res *root_res; if (start > end) return; if (start == MAX_RESOURCE) return; if (!merge) goto addit; /* try to merge it with old one */ list_for_each_entry(root_res, &info->resources, list) { resource_size_t final_start, final_end; resource_size_t common_start, common_end; res = &root_res->res; if (res->flags != flags) continue; common_start = max(res->start, start); common_end = min(res->end, end); if (common_start > common_end + 1) continue; final_start = min(res->start, start); final_end = max(res->end, end); res->start = final_start; res->end = final_end; return; } addit: /* need to add that */ root_res = kzalloc(sizeof(*root_res), GFP_KERNEL); if (!root_res) return; res = &root_res->res; res->name = info->name; res->flags = flags; res->start = start; res->end = end; list_add_tail(&root_res->list, &info->resources); }
gpl-2.0
ShogoFujii/PS-MPTCP
drivers/net/wireless/rtlwifi/rtl8192de/fw.c
3414
25612
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../pci.h" #include "../base.h" #include "reg.h" #include "def.h" #include "fw.h" #include "sw.h" static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv) { return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ? true : false; } static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp; if (enable) { tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); } else { tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); /* Reserved for fw extension. * 0x81[7] is used for mac0 status , * so don't write this reg here * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/ } } static void _rtl92d_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 blocksize = sizeof(u32); u8 *bufferptr = (u8 *) buffer; u32 *pu4BytePtr = (u32 *) buffer; u32 i, offset, blockCount, remainSize; blockCount = size / blocksize; remainSize = size % blocksize; for (i = 0; i < blockCount; i++) { offset = i * blocksize; rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset), *(pu4BytePtr + i)); } if (remainSize) { offset = blockCount * blocksize; bufferptr += offset; for (i = 0; i < remainSize; i++) { rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS + offset + i), *(bufferptr + i)); } } } static void _rtl92d_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 value8; u8 u8page = (u8) (page & 0x07); value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); _rtl92d_fw_block_write(hw, buffer, size); } static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen) { u32 fwlen = *pfwlen; u8 remain = (u8) (fwlen % 4); remain = (remain == 0) ? 0 : (4 - remain); while (remain > 0) { pfwbuf[fwlen] = 0; fwlen++; remain--; } *pfwlen = fwlen; } static void _rtl92d_write_fw(struct ieee80211_hw *hw, enum version_8192d version, u8 *buffer, u32 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *bufferPtr = buffer; u32 pagenums, remainSize; u32 page, offset; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) _rtl92d_fill_dummy(bufferPtr, &size); pagenums = size / FW_8192D_PAGE_SIZE; remainSize = size % FW_8192D_PAGE_SIZE; if (pagenums > 8) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Page numbers should not greater then 8\n"); } for (page = 0; page < pagenums; page++) { offset = page * FW_8192D_PAGE_SIZE; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), FW_8192D_PAGE_SIZE); } if (remainSize) { offset = pagenums * FW_8192D_PAGE_SIZE; page = pagenums; _rtl92d_fw_page_write(hw, page, (bufferPtr + offset), remainSize); } } static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 counter = 0; u32 value32; do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && (!(value32 & FWDL_ChkSum_rpt))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "chksum report faill ! REG_MCUFWDL:0x%08x\n", value32); return -EIO; } RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32); value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); value32 |= MCUFWDL_RDY; rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); return 0; } void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1b_tmp; u8 delay = 100; /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); while (u1b_tmp & BIT(2)) { delay--; if (delay == 0) break; udelay(50); u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } RT_ASSERT((delay > 0), "8051 reset failed!\n"); RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "=====> 8051 reset success (%d)\n", delay); } static int _rtl92d_fw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 counter; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n"); /* polling for FW ready */ counter = 0; do { if (rtlhal->interfaceindex == 0) { if (rtl_read_byte(rtlpriv, FW_MAC0_READY) & MAC0_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC0_READY)); return 0; } udelay(5); } else { if (rtl_read_byte(rtlpriv, FW_MAC1_READY) & MAC1_READY) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC1_READY)); return 0; } udelay(5); } } while (counter++ < POLLING_READY_TIMEOUT_COUNT); if (rtlhal->interfaceindex == 0) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC0_READY)); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n", rtl_read_byte(rtlpriv, FW_MAC1_READY)); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", rtl_read_dword(rtlpriv, REG_MCUFWDL)); return -1; } int rtl92d_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 *pfwheader; u8 *pfwdata; u32 fwsize; int err; enum version_8192d version = rtlhal->version; u8 value; u32 count; bool fw_downloaded = false, fwdl_in_process = false; unsigned long flags; if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) return 1; fwsize = rtlhal->fwsize; pfwheader = rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader); rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n", rtlhal->fw_version, rtlhal->fw_subversion, GET_FIRMWARE_HDR_SIGNATURE(pfwheader)); if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Shift 32 bytes for FW header!!\n"); pfwdata = pfwdata + 32; fwsize = fwsize - 32; } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; if (fw_downloaded) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); goto exit; } else if (fwdl_in_process) { spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); for (count = 0; count < 5000; count++) { udelay(500); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else fwdl_in_process = false; spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (fw_downloaded) goto exit; else if (!fwdl_in_process) break; else RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Wait for another mac download fw\n"); } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } else { value = rtl_read_byte(rtlpriv, 0x1f); value |= BIT(5); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); } /* If 8051 is running in RAM code, driver should * inform Fw to reset by itself, or it will cause * download Fw fail.*/ /* 8051 RAM code */ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { rtl92d_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); } _rtl92d_enable_fw_download(hw, true); _rtl92d_write_fw(hw, version, pfwdata, fwsize); _rtl92d_enable_fw_download(hw, false); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); err = _rtl92d_fw_free_to_go(hw); /* download fw over,clear 0x1f[5] */ value = rtl_read_byte(rtlpriv, 0x1f); value &= (~BIT(5)); rtl_write_byte(rtlpriv, 0x1f, value); spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fw is not ready to run!\n"); goto exit; } else { RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n"); } exit: err = _rtl92d_fw_init(hw); return err; } static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 val_hmetfr; bool result = false; val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); if (((val_hmetfr >> boxnum) & BIT(0)) == 0) result = true; return result; } static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); u8 boxnum; u16 box_reg = 0, box_extreg = 0; u8 u1b_tmp; bool isfw_read = false; u8 buf_index = 0; bool bwrite_success = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; u32 h2c_waitcounter = 0; unsigned long flag; u8 idx; if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Return as RF is off!!!\n"); return; } RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); while (true) { spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "H2C set in progress! Wait to set..element_id(%d)\n", element_id); while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); h2c_waitcounter++; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Wait 100 us (%d times)...\n", h2c_waitcounter); udelay(100); if (h2c_waitcounter > 1000) return; spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); } spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); } else { rtlhal->h2c_setinprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); break; } } while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write H2C fail because no trigger for FW INT!\n"); break; } boxnum = rtlhal->last_hmeboxnum; switch (boxnum) { case 0: box_reg = REG_HMEBOX_0; box_extreg = REG_HMEBOX_EXT_0; break; case 1: box_reg = REG_HMEBOX_1; box_extreg = REG_HMEBOX_EXT_1; break; case 2: box_reg = REG_HMEBOX_2; box_extreg = REG_HMEBOX_EXT_2; break; case 3: box_reg = REG_HMEBOX_3; box_extreg = REG_HMEBOX_EXT_3; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); while (!isfw_read) { wait_h2c_limmit--; if (wait_h2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Waiting too long for FW read clear HMEBox(%d)!\n", boxnum); break; } udelay(10); isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n", boxnum, u1b_tmp); } if (!isfw_read) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", boxnum); break; } memset(boxcontent, 0, sizeof(boxcontent)); memset(boxextcontent, 0, sizeof(boxextcontent)); boxcontent[0] = element_id; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "Write element_id box_reg(%4x) = %2x\n", box_reg, element_id); switch (cmd_len) { case 1: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 1); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 2: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 2); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 3: boxcontent[0] &= ~(BIT(7)); memcpy(boxcontent + 1, cmdbuffer + buf_index, 3); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 4: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; case 5: boxcontent[0] |= (BIT(7)); memcpy(boxextcontent, cmdbuffer + buf_index, 2); memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3); for (idx = 0; idx < 2; idx++) rtl_write_byte(rtlpriv, box_extreg + idx, boxextcontent[idx]); for (idx = 0; idx < 4; idx++) rtl_write_byte(rtlpriv, box_reg + idx, boxcontent[idx]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } bwrite_success = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) rtlhal->last_hmeboxnum = 0; RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "pHalData->last_hmeboxnum = %d\n", rtlhal->last_hmeboxnum); } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); rtlhal->h2c_setinprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); } void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) { u32 tmp_cmdbuf[2]; memset(tmp_cmdbuf, 0, 8); memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); return; } void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 u1_h2c_set_pwrmode[3] = { 0 }; struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1); SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode", u1_h2c_set_pwrmode, 3); rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); } static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; u8 idx = 0; unsigned long flags; struct sk_buff *pskb; ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); kfree_skb(pskb); spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); pdesc = &ring->desc[idx]; /* discard output from call below */ rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); __skb_queue_tail(&ring->queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); return true; } #define BEACON_PG 0 /*->1 */ #define PSPOLL_PG 2 #define NULL_PG 3 #define PROBERSP_PG 4 /*->5 */ #define TOTAL_RESERVED_PKT_LEN 768 static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { /* page 0 beacon */ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 1 beacon */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 2 ps-poll */ 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 3 null */ 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 4 probe_resp */ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00, 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 5 probe_resp */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; u8 u1RsvdPageLoc[3] = { 0 }; bool dlok = false; u8 *beacon; u8 *p_pspoll; u8 *nullfunc; u8 *p_probersp; /*--------------------------------------------------------- (1) beacon ---------------------------------------------------------*/ beacon = &reserved_page_packet[BEACON_PG * 128]; SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); SET_80211_HDR_ADDRESS3(beacon, mac->bssid); /*------------------------------------------------------- (2) ps-poll --------------------------------------------------------*/ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data ---------------------------------------------------------*/ nullfunc = &reserved_page_packet[NULL_PG * 128]; SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", u1RsvdPageLoc, 3); skb = dev_alloc_skb(totalpacketlen); if (!skb) { dlok = false; } else { memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); rtstatus = _rtl92d_cmd_send_packet(hw, skb); if (rtstatus) dlok = true; } if (dlok) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE", u1RsvdPageLoc, 3); rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE, sizeof(u1RsvdPageLoc), u1RsvdPageLoc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!\n"); } void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { u8 u1_joinbssrpt_parm[1] = {0}; SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); }
gpl-2.0
keiranFTW/android_kernel_sony_u8500
drivers/usb/serial/ipw.c
3926
9587
/* * IPWireless 3G UMTS TDD Modem driver (USB connected) * * Copyright (C) 2004 Roelf Diedericks <roelfd@inet.co.za> * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * All information about the device was acquired using SnoopyPro * on MSFT's O/S, and examing the MSFT drivers' debug output * (insanely left _on_ in the enduser version) * * It was written out of frustration with the IPWireless USB modem * supplied by Axity3G/Sentech South Africa not supporting * Linux whatsoever. * * Nobody provided any proprietary information that was not already * available for this device. * * The modem adheres to the "3GPP TS 27.007 AT command set for 3G * User Equipment (UE)" standard, available from * http://www.3gpp.org/ftp/Specs/html-info/27007.htm * * The code was only tested the IPWireless handheld modem distributed * in South Africa by Sentech. * * It may work for Woosh Inc in .nz too, as it appears they use the * same kit. * * There is still some work to be done in terms of handling * DCD, DTR, RTS, CTS which are currently faked. * It's good enough for PPP at this point. It's based off all kinds of * code found in usb/serial and usb/class */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/uaccess.h> /* * Version Information */ #define DRIVER_VERSION "v0.4" #define DRIVER_AUTHOR "Roelf Diedericks" #define DRIVER_DESC "IPWireless tty driver" #define IPW_TTY_MAJOR 240 /* real device node major id, experimental range */ #define IPW_TTY_MINORS 256 /* we support 256 devices, dunno why, it'd be insane :) */ #define USB_IPW_MAGIC 0x6d02 /* magic number for ipw struct */ /* Message sizes */ #define EVENT_BUFFER_SIZE 0xFF #define CHAR2INT16(c1, c0) (((u32)((c1) & 0xff) << 8) + (u32)((c0) & 0xff)) /* vendor/product pairs that are known work with this driver*/ #define IPW_VID 0x0bc3 #define IPW_PID 0x0001 /* Vendor commands: */ /* baud rates */ enum { ipw_sio_b256000 = 0x000e, ipw_sio_b128000 = 0x001d, ipw_sio_b115200 = 0x0020, ipw_sio_b57600 = 0x0040, ipw_sio_b56000 = 0x0042, ipw_sio_b38400 = 0x0060, ipw_sio_b19200 = 0x00c0, ipw_sio_b14400 = 0x0100, ipw_sio_b9600 = 0x0180, ipw_sio_b4800 = 0x0300, ipw_sio_b2400 = 0x0600, ipw_sio_b1200 = 0x0c00, ipw_sio_b600 = 0x1800 }; /* data bits */ #define ipw_dtb_7 0x700 #define ipw_dtb_8 0x810 /* ok so the define is misleading, I know, but forces 8,n,1 */ /* I mean, is there a point to any other setting these days? :) */ /* usb control request types : */ #define IPW_SIO_RXCTL 0x00 /* control bulk rx channel transmissions, value=1/0 (on/off) */ #define IPW_SIO_SET_BAUD 0x01 /* set baud, value=requested ipw_sio_bxxxx */ #define IPW_SIO_SET_LINE 0x03 /* set databits, parity. value=ipw_dtb_x */ #define IPW_SIO_SET_PIN 0x03 /* set/clear dtr/rts value=ipw_pin_xxx */ #define IPW_SIO_POLL 0x08 /* get serial port status byte, call with value=0 */ #define IPW_SIO_INIT 0x11 /* initializes ? value=0 (appears as first thing todo on open) */ #define IPW_SIO_PURGE 0x12 /* purge all transmissions?, call with value=numchar_to_purge */ #define IPW_SIO_HANDFLOW 0x13 /* set xon/xoff limits value=0, and a buffer of 0x10 bytes */ #define IPW_SIO_SETCHARS 0x13 /* set the flowcontrol special chars, value=0, buf=6 bytes, */ /* last 2 bytes contain flowcontrol chars e.g. 00 00 00 00 11 13 */ /* values used for request IPW_SIO_SET_PIN */ #define IPW_PIN_SETDTR 0x101 #define IPW_PIN_SETRTS 0x202 #define IPW_PIN_CLRDTR 0x100 #define IPW_PIN_CLRRTS 0x200 /* unconfirmed */ /* values used for request IPW_SIO_RXCTL */ #define IPW_RXBULK_ON 1 #define IPW_RXBULK_OFF 0 /* various 16 byte hardcoded transferbuffers used by flow control */ #define IPW_BYTES_FLOWINIT { 0x01, 0, 0, 0, 0x40, 0, 0, 0, \ 0, 0, 0, 0, 0, 0, 0, 0 } /* Interpretation of modem status lines */ /* These need sorting out by individually connecting pins and checking * results. FIXME! * When data is being sent we see 0x30 in the lower byte; this must * contain DSR and CTS ... */ #define IPW_DSR ((1<<4) | (1<<5)) #define IPW_CTS ((1<<5) | (1<<4)) #define IPW_WANTS_TO_SEND 0x30 static const struct usb_device_id usb_ipw_ids[] = { { USB_DEVICE(IPW_VID, IPW_PID) }, { }, }; MODULE_DEVICE_TABLE(usb, usb_ipw_ids); static struct usb_driver usb_ipw_driver = { .name = "ipwtty", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = usb_ipw_ids, .no_dynamic_id = 1, }; static int debug; static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; u8 buf_flow_static[16] = IPW_BYTES_FLOWINIT; u8 *buf_flow_init; int result; dbg("%s", __func__); buf_flow_init = kmemdup(buf_flow_static, 16, GFP_KERNEL); if (!buf_flow_init) return -ENOMEM; /* --1: Tell the modem to initialize (we think) From sniffs this is * always the first thing that gets sent to the modem during * opening of the device */ dbg("%s: Sending SIO_INIT (we guess)", __func__); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_INIT, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, 0, 0, /* index */ NULL, 0, 100000); if (result < 0) dev_err(&port->dev, "Init of modem failed (error = %d)\n", result); /* reset the bulk pipes */ usb_clear_halt(dev, usb_rcvbulkpipe(dev, port->bulk_in_endpointAddress)); usb_clear_halt(dev, usb_sndbulkpipe(dev, port->bulk_out_endpointAddress)); /*--2: Start reading from the device */ dbg("%s: setting up bulk read callback", __func__); usb_serial_generic_open(tty, port); /*--3: Tell the modem to open the floodgates on the rx bulk channel */ dbg("%s:asking modem for RxRead (RXBULK_ON)", __func__); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_RXCTL, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, IPW_RXBULK_ON, 0, /* index */ NULL, 0, 100000); if (result < 0) dev_err(&port->dev, "Enabling bulk RxRead failed (error = %d)\n", result); /*--4: setup the initial flowcontrol */ dbg("%s:setting init flowcontrol (%s)", __func__, buf_flow_init); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_HANDFLOW, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, 0, 0, buf_flow_init, 0x10, 200000); if (result < 0) dev_err(&port->dev, "initial flowcontrol failed (error = %d)\n", result); kfree(buf_flow_init); return 0; } static void ipw_dtr_rts(struct usb_serial_port *port, int on) { struct usb_device *dev = port->serial->dev; int result; dbg("%s: on = %d", __func__, on); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_SET_PIN, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, on ? IPW_PIN_SETDTR : IPW_PIN_CLRDTR, 0, NULL, 0, 200000); if (result < 0) dev_err(&port->dev, "setting dtr failed (error = %d)\n", result); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_SET_PIN, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, on ? IPW_PIN_SETRTS : IPW_PIN_CLRRTS, 0, NULL, 0, 200000); if (result < 0) dev_err(&port->dev, "setting rts failed (error = %d)\n", result); } static void ipw_close(struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; int result; /*--3: purge */ dbg("%s:sending purge", __func__); result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_PURGE, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, 0x03, 0, NULL, 0, 200000); if (result < 0) dev_err(&port->dev, "purge failed (error = %d)\n", result); /* send RXBULK_off (tell modem to stop transmitting bulk data on rx chan) */ result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), IPW_SIO_RXCTL, USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT, IPW_RXBULK_OFF, 0, /* index */ NULL, 0, 100000); if (result < 0) dev_err(&port->dev, "Disabling bulk RxRead failed (error = %d)\n", result); usb_serial_generic_close(port); } static struct usb_serial_driver ipw_device = { .driver = { .owner = THIS_MODULE, .name = "ipw", }, .description = "IPWireless converter", .usb_driver = &usb_ipw_driver, .id_table = usb_ipw_ids, .num_ports = 1, .open = ipw_open, .close = ipw_close, .dtr_rts = ipw_dtr_rts, }; static int __init usb_ipw_init(void) { int retval; retval = usb_serial_register(&ipw_device); if (retval) return retval; retval = usb_register(&usb_ipw_driver); if (retval) { usb_serial_deregister(&ipw_device); return retval; } printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; } static void __exit usb_ipw_exit(void) { usb_deregister(&usb_ipw_driver); usb_serial_deregister(&ipw_device); } module_init(usb_ipw_init); module_exit(usb_ipw_exit); /* Module information */ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
usmcamgrimm/kernel_lge_g3
arch/arm/mach-pxa/pxa25x.c
4950
9905
/* * linux/arch/arm/mach-pxa/pxa25x.c * * Author: Nicolas Pitre * Created: Jun 15, 2001 * Copyright: MontaVista Software Inc. * * Code specific to PXA21x/25x/26x variants. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Since this file should be linked before any other machine specific file, * the __initcall() here will be executed first. This serves as default * initialization stuff for PXA machines which can be overridden later if * need be. */ #include <linux/gpio.h> #include <linux/gpio-pxa.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/irq.h> #include <asm/mach/map.h> #include <asm/suspend.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/pxa25x.h> #include <mach/reset.h> #include <mach/pm.h> #include <mach/dma.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" #include "clock.h" /* * Various clock factors driven by the CCCR register. */ /* Crystal Frequency to Memory Frequency Multiplier (L) */ static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, }; /* Memory Frequency to Run Mode Frequency Multiplier (M) */ static unsigned char M_clk_mult[4] = { 0, 1, 2, 4 }; /* Run Mode Frequency to Turbo Mode Frequency Multiplier (N) */ /* Note: we store the value N * 2 here. */ static unsigned char N2_clk_mult[8] = { 0, 0, 2, 3, 4, 0, 6, 0 }; /* Crystal clock */ #define BASE_CLK 3686400 /* * Get the clock frequency as reflected by CCCR and the turbo flag. * We assume these values have been applied via a fcs. * If info is not 0 we also display the current settings. */ unsigned int pxa25x_get_clk_frequency_khz(int info) { unsigned long cccr, turbo; unsigned int l, L, m, M, n2, N; cccr = CCCR; asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (turbo) ); l = L_clk_mult[(cccr >> 0) & 0x1f]; m = M_clk_mult[(cccr >> 5) & 0x03]; n2 = N2_clk_mult[(cccr >> 7) & 0x07]; L = l * BASE_CLK; M = m * L; N = n2 * M / 2; if(info) { L += 5000; printk( KERN_INFO "Memory clock: %d.%02dMHz (*%d)\n", L / 1000000, (L % 1000000) / 10000, l ); M += 5000; printk( KERN_INFO "Run Mode clock: %d.%02dMHz (*%d)\n", M / 1000000, (M % 1000000) / 10000, m ); N += 5000; printk( KERN_INFO "Turbo Mode clock: %d.%02dMHz (*%d.%d, %sactive)\n", N / 1000000, (N % 1000000) / 10000, n2 / 2, (n2 % 2) * 5, (turbo & 1) ? "" : "in" ); } return (turbo & 1) ? (N/1000) : (M/1000); } static unsigned long clk_pxa25x_mem_getrate(struct clk *clk) { return L_clk_mult[(CCCR >> 0) & 0x1f] * BASE_CLK; } static const struct clkops clk_pxa25x_mem_ops = { .enable = clk_dummy_enable, .disable = clk_dummy_disable, .getrate = clk_pxa25x_mem_getrate, }; static const struct clkops clk_pxa25x_lcd_ops = { .enable = clk_pxa2xx_cken_enable, .disable = clk_pxa2xx_cken_disable, .getrate = clk_pxa25x_mem_getrate, }; static unsigned long gpio12_config_32k[] = { GPIO12_32KHz, }; static unsigned long gpio12_config_gpio[] = { GPIO12_GPIO, }; static void clk_gpio12_enable(struct clk *clk) { pxa2xx_mfp_config(gpio12_config_32k, 1); } static void clk_gpio12_disable(struct clk *clk) { pxa2xx_mfp_config(gpio12_config_gpio, 1); } static const struct clkops clk_pxa25x_gpio12_ops = { .enable = clk_gpio12_enable, .disable = clk_gpio12_disable, }; static unsigned long gpio11_config_3m6[] = { GPIO11_3_6MHz, }; static unsigned long gpio11_config_gpio[] = { GPIO11_GPIO, }; static void clk_gpio11_enable(struct clk *clk) { pxa2xx_mfp_config(gpio11_config_3m6, 1); } static void clk_gpio11_disable(struct clk *clk) { pxa2xx_mfp_config(gpio11_config_gpio, 1); } static const struct clkops clk_pxa25x_gpio11_ops = { .enable = clk_gpio11_enable, .disable = clk_gpio11_disable, }; /* * 3.6864MHz -> OST, GPIO, SSP, PWM, PLLs (95.842MHz, 147.456MHz) * 95.842MHz -> MMC 19.169MHz, I2C 31.949MHz, FICP 47.923MHz, USB 47.923MHz * 147.456MHz -> UART 14.7456MHz, AC97 12.288MHz, I2S 5.672MHz (allegedly) */ /* * PXA 2xx clock declarations. */ static DEFINE_PXA2_CKEN(pxa25x_hwuart, HWUART, 14745600, 1); static DEFINE_PXA2_CKEN(pxa25x_ffuart, FFUART, 14745600, 1); static DEFINE_PXA2_CKEN(pxa25x_btuart, BTUART, 14745600, 1); static DEFINE_PXA2_CKEN(pxa25x_stuart, STUART, 14745600, 1); static DEFINE_PXA2_CKEN(pxa25x_usb, USB, 47923000, 5); static DEFINE_PXA2_CKEN(pxa25x_mmc, MMC, 19169000, 0); static DEFINE_PXA2_CKEN(pxa25x_i2c, I2C, 31949000, 0); static DEFINE_PXA2_CKEN(pxa25x_ssp, SSP, 3686400, 0); static DEFINE_PXA2_CKEN(pxa25x_nssp, NSSP, 3686400, 0); static DEFINE_PXA2_CKEN(pxa25x_assp, ASSP, 3686400, 0); static DEFINE_PXA2_CKEN(pxa25x_pwm0, PWM0, 3686400, 0); static DEFINE_PXA2_CKEN(pxa25x_pwm1, PWM1, 3686400, 0); static DEFINE_PXA2_CKEN(pxa25x_ac97, AC97, 24576000, 0); static DEFINE_PXA2_CKEN(pxa25x_i2s, I2S, 14745600, 0); static DEFINE_PXA2_CKEN(pxa25x_ficp, FICP, 47923000, 0); static DEFINE_CK(pxa25x_lcd, LCD, &clk_pxa25x_lcd_ops); static DEFINE_CLK(pxa25x_gpio11, &clk_pxa25x_gpio11_ops, 3686400, 0); static DEFINE_CLK(pxa25x_gpio12, &clk_pxa25x_gpio12_ops, 32768, 0); static DEFINE_CLK(pxa25x_mem, &clk_pxa25x_mem_ops, 0, 0); static struct clk_lookup pxa25x_clkregs[] = { INIT_CLKREG(&clk_pxa25x_lcd, "pxa2xx-fb", NULL), INIT_CLKREG(&clk_pxa25x_ffuart, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_pxa25x_btuart, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_pxa25x_stuart, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_pxa25x_usb, "pxa25x-udc", NULL), INIT_CLKREG(&clk_pxa25x_mmc, "pxa2xx-mci.0", NULL), INIT_CLKREG(&clk_pxa25x_i2c, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_pxa25x_ssp, "pxa25x-ssp.0", NULL), INIT_CLKREG(&clk_pxa25x_nssp, "pxa25x-nssp.1", NULL), INIT_CLKREG(&clk_pxa25x_assp, "pxa25x-nssp.2", NULL), INIT_CLKREG(&clk_pxa25x_pwm0, "pxa25x-pwm.0", NULL), INIT_CLKREG(&clk_pxa25x_pwm1, "pxa25x-pwm.1", NULL), INIT_CLKREG(&clk_pxa25x_i2s, "pxa2xx-i2s", NULL), INIT_CLKREG(&clk_pxa25x_stuart, "pxa2xx-ir", "UARTCLK"), INIT_CLKREG(&clk_pxa25x_ficp, "pxa2xx-ir", "FICPCLK"), INIT_CLKREG(&clk_pxa25x_ac97, NULL, "AC97CLK"), INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"), INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"), INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL), INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL), }; static struct clk_lookup pxa25x_hwuart_clkreg = INIT_CLKREG(&clk_pxa25x_hwuart, "pxa2xx-uart.3", NULL); #ifdef CONFIG_PM #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] /* * List of global PXA peripheral registers to preserve. * More ones like CP and general purpose register values are preserved * with the stack pointer in sleep.S. */ enum { SLEEP_SAVE_PSTR, SLEEP_SAVE_COUNT }; static void pxa25x_cpu_pm_save(unsigned long *sleep_save) { SAVE(PSTR); } static void pxa25x_cpu_pm_restore(unsigned long *sleep_save) { RESTORE(PSTR); } static void pxa25x_cpu_pm_enter(suspend_state_t state) { /* Clear reset status */ RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR; switch (state) { case PM_SUSPEND_MEM: cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); break; } } static int pxa25x_cpu_pm_prepare(void) { /* set resume return address */ PSPR = virt_to_phys(cpu_resume); return 0; } static void pxa25x_cpu_pm_finish(void) { /* ensure not to come back here if it wasn't intended */ PSPR = 0; } static struct pxa_cpu_pm_fns pxa25x_cpu_pm_fns = { .save_count = SLEEP_SAVE_COUNT, .valid = suspend_valid_only_mem, .save = pxa25x_cpu_pm_save, .restore = pxa25x_cpu_pm_restore, .enter = pxa25x_cpu_pm_enter, .prepare = pxa25x_cpu_pm_prepare, .finish = pxa25x_cpu_pm_finish, }; static void __init pxa25x_init_pm(void) { pxa_cpu_pm_fns = &pxa25x_cpu_pm_fns; } #else static inline void pxa25x_init_pm(void) {} #endif /* PXA25x: supports wakeup from GPIO0..GPIO15 and RTC alarm */ static int pxa25x_set_wake(struct irq_data *d, unsigned int on) { int gpio = pxa_irq_to_gpio(d->irq); uint32_t mask = 0; if (gpio >= 0 && gpio < 85) return gpio_set_wake(gpio, on); if (d->irq == IRQ_RTCAlrm) { mask = PWER_RTC; goto set_pwer; } return -EINVAL; set_pwer: if (on) PWER |= mask; else PWER &=~mask; return 0; } void __init pxa25x_init_irq(void) { pxa_init_irq(32, pxa25x_set_wake); } #ifdef CONFIG_CPU_PXA26x void __init pxa26x_init_irq(void) { pxa_init_irq(32, pxa25x_set_wake); } #endif static struct map_desc pxa25x_io_desc[] __initdata = { { /* Mem Ctl */ .virtual = (unsigned long)SMEMC_VIRT, .pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE), .length = 0x00200000, .type = MT_DEVICE }, }; void __init pxa25x_map_io(void) { pxa_map_io(); iotable_init(ARRAY_AND_SIZE(pxa25x_io_desc)); pxa25x_get_clk_frequency_khz(1); } static struct platform_device *pxa25x_devices[] __initdata = { &pxa25x_device_udc, &pxa_device_pmu, &pxa_device_i2s, &sa1100_device_rtc, &pxa25x_device_ssp, &pxa25x_device_nssp, &pxa25x_device_assp, &pxa25x_device_pwm0, &pxa25x_device_pwm1, &pxa_device_asoc_platform, }; static int __init pxa25x_init(void) { int ret = 0; if (cpu_is_pxa25x()) { reset_status = RCSR; clkdev_add_table(pxa25x_clkregs, ARRAY_SIZE(pxa25x_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 16))) return ret; pxa25x_init_pm(); register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa2xx_mfp_syscore_ops); register_syscore_ops(&pxa2xx_clock_syscore_ops); ret = platform_add_devices(pxa25x_devices, ARRAY_SIZE(pxa25x_devices)); if (ret) return ret; } /* Only add HWUART for PXA255/26x; PXA210/250 do not have it. */ if (cpu_is_pxa255()) clkdev_add(&pxa25x_hwuart_clkreg); return ret; } postcore_initcall(pxa25x_init);
gpl-2.0
SlimRoms/kernel_sony_msm8974pro
net/rxrpc/ar-output.c
4950
18399
/* RxRPC packet transmission * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/net.h> #include <linux/gfp.h> #include <linux/skbuff.h> #include <linux/circ_buf.h> #include <linux/export.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" int rxrpc_resend_timeout = 4; static int rxrpc_send_data(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len); /* * extract control messages from the sendmsg() control buffer */ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, unsigned long *user_call_ID, enum rxrpc_command *command, u32 *abort_code, bool server) { struct cmsghdr *cmsg; int len; *command = RXRPC_CMD_SEND_DATA; if (msg->msg_controllen == 0) return -EINVAL; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); _debug("CMSG %d, %d, %d", cmsg->cmsg_level, cmsg->cmsg_type, len); if (cmsg->cmsg_level != SOL_RXRPC) continue; switch (cmsg->cmsg_type) { case RXRPC_USER_CALL_ID: if (msg->msg_flags & MSG_CMSG_COMPAT) { if (len != sizeof(u32)) return -EINVAL; *user_call_ID = *(u32 *) CMSG_DATA(cmsg); } else { if (len != sizeof(unsigned long)) return -EINVAL; *user_call_ID = *(unsigned long *) CMSG_DATA(cmsg); } _debug("User Call ID %lx", *user_call_ID); break; case RXRPC_ABORT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_SEND_ABORT; if (len != sizeof(*abort_code)) return -EINVAL; *abort_code = *(unsigned int *) CMSG_DATA(cmsg); _debug("Abort %x", *abort_code); if (*abort_code == 0) return -EINVAL; break; case RXRPC_ACCEPT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_ACCEPT; if (len != 0) return -EINVAL; if (!server) return -EISCONN; break; default: return -EINVAL; } } _leave(" = 0"); return 0; } /* * abort a call, sending an ABORT packet to the peer */ static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code) { write_lock_bh(&call->state_lock); if (call->state <= RXRPC_CALL_COMPLETE) { call->state = RXRPC_CALL_LOCALLY_ABORTED; call->abort_code = abort_code; set_bit(RXRPC_CALL_ABORT, &call->events); del_timer_sync(&call->resend_timer); del_timer_sync(&call->ack_timer); clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); clear_bit(RXRPC_CALL_ACK, &call->events); clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); rxrpc_queue_call(call); } write_unlock_bh(&call->state_lock); } /* * send a message forming part of a client call through an RxRPC socket * - caller holds the socket locked * - the socket may be either a client socket or a server socket */ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct msghdr *msg, size_t len) { struct rxrpc_conn_bundle *bundle; enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; struct key *key; __be16 service_id; u32 abort_code = 0; int ret; _enter(""); ASSERT(trans != NULL); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, false); if (ret < 0) return ret; bundle = NULL; if (trans) { service_id = rx->service_id; if (msg->msg_name) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) msg->msg_name; service_id = htons(srx->srx_service); } key = rx->key; if (key && !rx->key->payload.data) key = NULL; bundle = rxrpc_get_bundle(rx, trans, key, service_id, GFP_KERNEL); if (IS_ERR(bundle)) return PTR_ERR(bundle); } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, abort_code == 0, GFP_KERNEL); if (trans) rxrpc_put_bundle(trans, bundle); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return PTR_ERR(call); } _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { rxrpc_send_abort(call, abort_code); } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; } else { ret = rxrpc_send_data(iocb, rx, call, msg, len); } rxrpc_put_call(call); _leave(" = %d", ret); return ret; } /** * rxrpc_kernel_send_data - Allow a kernel service to send data on a call * @call: The call to send data through * @msg: The data to send * @len: The amount of data to send * * Allow a kernel service to send data on a call. The call must be in an state * appropriate to sending data. No control data should be supplied in @msg, * nor should an address be supplied. MSG_MORE should be flagged if there's * more data to come, otherwise this data will end the transmission phase. */ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, size_t len) { int ret; _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); ASSERTCMP(msg->msg_name, ==, NULL); ASSERTCMP(msg->msg_control, ==, NULL); lock_sock(&call->socket->sk); _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { ret = -ESHUTDOWN; /* it's too late for this call */ } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { ret = -EPROTO; /* request phase complete for this client call */ } else { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); ret = rxrpc_send_data(NULL, call->socket, call, msg, len); set_fs(oldfs); } release_sock(&call->socket->sk); _leave(" = %d", ret); return ret; } EXPORT_SYMBOL(rxrpc_kernel_send_data); /* * rxrpc_kernel_abort_call - Allow a kernel service to abort a call * @call: The call to be aborted * @abort_code: The abort code to stick into the ABORT packet * * Allow a kernel service to abort a call, if it's still in an abortable state. */ void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code) { _enter("{%d},%d", call->debug_id, abort_code); lock_sock(&call->socket->sk); _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state < RXRPC_CALL_COMPLETE) rxrpc_send_abort(call, abort_code); release_sock(&call->socket->sk); _leave(""); } EXPORT_SYMBOL(rxrpc_kernel_abort_call); /* * send a message through a server socket * - caller holds the socket locked */ int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct msghdr *msg, size_t len) { enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; u32 abort_code = 0; int ret; _enter(""); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, true); if (ret < 0) return ret; if (cmd == RXRPC_CMD_ACCEPT) { call = rxrpc_accept_call(rx, user_call_ID); if (IS_ERR(call)) return PTR_ERR(call); rxrpc_put_call(call); return 0; } call = rxrpc_find_server_call(rx, user_call_ID); if (!call) return -EBADSLT; if (call->state >= RXRPC_CALL_COMPLETE) { ret = -ESHUTDOWN; goto out; } switch (cmd) { case RXRPC_CMD_SEND_DATA: if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST && call->state != RXRPC_CALL_SERVER_ACK_REQUEST && call->state != RXRPC_CALL_SERVER_SEND_REPLY) { /* Tx phase not yet begun for this call */ ret = -EPROTO; break; } ret = rxrpc_send_data(iocb, rx, call, msg, len); break; case RXRPC_CMD_SEND_ABORT: rxrpc_send_abort(call, abort_code); break; default: BUG(); } out: rxrpc_put_call(call); _leave(" = %d", ret); return ret; } /* * send a packet through the transport endpoint */ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) { struct kvec iov[1]; struct msghdr msg; int ret, opt; _enter(",{%d}", skb->len); iov[0].iov_base = skb->head; iov[0].iov_len = skb->len; msg.msg_name = &trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; /* send the packet with the don't fragment bit set if we currently * think it's small enough */ if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { down_read(&trans->local->defrag_sem); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet * to go out of the interface * - in which case, we'll have processed the ICMP error * message and update the peer record */ ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); up_read(&trans->local->defrag_sem); if (ret == -EMSGSIZE) goto send_fragmentable; _leave(" = %d [%u]", ret, trans->peer->maxdata); return ret; } send_fragmentable: /* attempt to send this message with fragmentation enabled */ _debug("send fragment"); down_write(&trans->local->defrag_sem); opt = IP_PMTUDISC_DONT; ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret == 0) { ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); opt = IP_PMTUDISC_DO; kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); } up_write(&trans->local->defrag_sem); _leave(" = %d [frag %u]", ret, trans->peer->maxdata); return ret; } /* * wait for space to appear in the transmit/ACK window * - caller holds the socket locked */ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx, struct rxrpc_call *call, long *timeo) { DECLARE_WAITQUEUE(myself, current); int ret; _enter(",{%d},%ld", CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz), *timeo); add_wait_queue(&call->tx_waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); ret = 0; if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) > 0) break; if (signal_pending(current)) { ret = sock_intr_errno(*timeo); break; } release_sock(&rx->sk); *timeo = schedule_timeout(*timeo); lock_sock(&rx->sk); } remove_wait_queue(&call->tx_waitq, &myself); set_current_state(TASK_RUNNING); _leave(" = %d", ret); return ret; } /* * attempt to schedule an instant Tx resend */ static inline void rxrpc_instant_resend(struct rxrpc_call *call) { read_lock_bh(&call->state_lock); if (try_to_del_timer_sync(&call->resend_timer) >= 0) { clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); if (call->state < RXRPC_CALL_COMPLETE && !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events)) rxrpc_queue_call(call); } read_unlock_bh(&call->state_lock); } /* * queue a packet for transmission, set the resend timer and attempt * to send the packet immediately */ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, bool last) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); int ret; _net("queue skb %p [%d]", skb, call->acks_head); ASSERT(call->acks_window != NULL); call->acks_window[call->acks_head] = (unsigned long) skb; smp_wmb(); call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { _debug("________awaiting reply/ACK__________"); write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_CLIENT_SEND_REQUEST: call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; break; case RXRPC_CALL_SERVER_ACK_REQUEST: call->state = RXRPC_CALL_SERVER_SEND_REPLY; if (!last) break; case RXRPC_CALL_SERVER_SEND_REPLY: call->state = RXRPC_CALL_SERVER_AWAIT_ACK; break; default: break; } write_unlock_bh(&call->state_lock); } _proto("Tx DATA %%%u { #%u }", ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); sp->need_resend = false; sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { _debug("run timer"); call->resend_timer.expires = sp->resend_at; add_timer(&call->resend_timer); } /* attempt to cancel the rx-ACK timer, deferring reply transmission if * we're ACK'ing the request phase of an incoming call */ ret = -EAGAIN; if (try_to_del_timer_sync(&call->ack_timer) >= 0) { /* the packet may be freed by rxrpc_process_call() before this * returns */ ret = rxrpc_send_packet(call->conn->trans, skb); _net("sent skb %p", skb); } else { _debug("failed to delete ACK timer"); } if (ret < 0) { _debug("need instant resend %d", ret); sp->need_resend = true; rxrpc_instant_resend(call); } _leave(""); } /* * send data through a socket * - must be called in process context * - caller holds the socket locked */ static int rxrpc_send_data(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_call *call, struct msghdr *msg, size_t len) { struct rxrpc_skb_priv *sp; unsigned char __user *from; struct sk_buff *skb; struct iovec *iov; struct sock *sk = &rx->sk; long timeo; bool more; int ret, ioc, segment, copied; _enter(",,,{%zu},%zu", msg->msg_iovlen, len); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); /* this should be in poll */ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) return -EPIPE; iov = msg->msg_iov; ioc = msg->msg_iovlen - 1; from = iov->iov_base; segment = iov->iov_len; iov++; more = msg->msg_flags & MSG_MORE; skb = call->tx_pending; call->tx_pending = NULL; copied = 0; do { int copy; if (segment > len) segment = len; _debug("SEGMENT %d @%p", segment, from); if (!skb) { size_t size, chunk, max, space; _debug("alloc"); if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) <= 0) { ret = -EAGAIN; if (msg->msg_flags & MSG_DONTWAIT) goto maybe_error; ret = rxrpc_wait_for_tx_window(rx, call, &timeo); if (ret < 0) goto maybe_error; } max = call->conn->trans->peer->maxdata; max -= call->conn->security_size; max &= ~(call->conn->size_align - 1UL); chunk = max; if (chunk > len && !more) chunk = len; space = chunk + call->conn->size_align; space &= ~(call->conn->size_align - 1UL); size = space + call->conn->header_size; _debug("SIZE: %zu/%zu/%zu", chunk, space, size); /* create a buffer that we can retain until it's ACK'd */ skb = sock_alloc_send_skb( sk, size, msg->msg_flags & MSG_DONTWAIT, &ret); if (!skb) goto maybe_error; rxrpc_new_skb(skb); _debug("ALLOC SEND %p", skb); ASSERTCMP(skb->mark, ==, 0); _debug("HS: %u", call->conn->header_size); skb_reserve(skb, call->conn->header_size); skb->len += call->conn->header_size; sp = rxrpc_skb(skb); sp->remain = chunk; if (sp->remain > skb_tailroom(skb)) sp->remain = skb_tailroom(skb); _net("skb: hr %d, tr %d, hl %d, rm %d", skb_headroom(skb), skb_tailroom(skb), skb_headlen(skb), sp->remain); skb->ip_summed = CHECKSUM_UNNECESSARY; } _debug("append"); sp = rxrpc_skb(skb); /* append next segment of data to the current buffer */ copy = skb_tailroom(skb); ASSERTCMP(copy, >, 0); if (copy > segment) copy = segment; if (copy > sp->remain) copy = sp->remain; _debug("add"); ret = skb_add_data(skb, from, copy); _debug("added"); if (ret < 0) goto efault; sp->remain -= copy; skb->mark += copy; copied += copy; len -= copy; segment -= copy; from += copy; while (segment == 0 && ioc > 0) { from = iov->iov_base; segment = iov->iov_len; iov++; ioc--; } if (len == 0) { segment = 0; ioc = 0; } /* check for the far side aborting the call or a network error * occurring */ if (call->state > RXRPC_CALL_COMPLETE) goto call_aborted; /* add the packet to the send queue if it's now full */ if (sp->remain <= 0 || (segment == 0 && !more)) { struct rxrpc_connection *conn = call->conn; size_t pad; /* pad out if we're using security */ if (conn->security) { pad = conn->security_size + skb->mark; pad = conn->size_align - pad; pad &= conn->size_align - 1; _debug("pad %zu", pad); if (pad) memset(skb_put(skb, pad), 0, pad); } sp->hdr.epoch = conn->epoch; sp->hdr.cid = call->cid; sp->hdr.callNumber = call->call_id; sp->hdr.seq = htonl(atomic_inc_return(&call->sequence)); sp->hdr.serial = htonl(atomic_inc_return(&conn->serial)); sp->hdr.type = RXRPC_PACKET_TYPE_DATA; sp->hdr.userStatus = 0; sp->hdr.securityIndex = conn->security_ix; sp->hdr._rsvd = 0; sp->hdr.serviceId = conn->service_id; sp->hdr.flags = conn->out_clientflag; if (len == 0 && !more) sp->hdr.flags |= RXRPC_LAST_PACKET; else if (CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz) > 1) sp->hdr.flags |= RXRPC_MORE_PACKETS; ret = rxrpc_secure_packet( call, skb, skb->mark, skb->head + sizeof(struct rxrpc_header)); if (ret < 0) goto out; memcpy(skb->head, &sp->hdr, sizeof(struct rxrpc_header)); rxrpc_queue_packet(call, skb, segment == 0 && !more); skb = NULL; } } while (segment > 0); success: ret = copied; out: call->tx_pending = skb; _leave(" = %d", ret); return ret; call_aborted: rxrpc_free_skb(skb); if (call->state == RXRPC_CALL_NETWORK_ERROR) ret = call->conn->trans->peer->net_error; else ret = -ECONNABORTED; _leave(" = %d", ret); return ret; maybe_error: if (copied) goto success; goto out; efault: ret = -EFAULT; goto out; }
gpl-2.0
kamarush/Xperia-2011-4.1.B.0.431-Sources
arch/mips/txx9/generic/setup_tx4927.c
9302
9772
/* * TX4927 setup routines * Based on linux/arch/mips/txx9/rbtx4938/setup.c, * and RBTX49xx patch from CELF patch archive. * * 2003-2005 (c) MontaVista Software, Inc. * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/param.h> #include <linux/ptrace.h> #include <linux/mtd/physmap.h> #include <asm/reboot.h> #include <asm/traps.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/dmac.h> #include <asm/txx9/tx4927.h> static void __init tx4927_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST) pr_warning("Watchdog reset detected at 0x%lx\n", read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4927_ccfg_set(TX4927_CCFG_WDRST); /* do reset on watchdog */ tx4927_ccfg_set(TX4927_CCFG_WR); } void __init tx4927_wdt_init(void) { txx9_wdt_init(TX4927_TMR_REG(2) & 0xfffffffffULL); } static void tx4927_machine_restart(char *command) { local_irq_disable(); pr_emerg("Rebooting (with %s watchdog reset)...\n", (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDREXEN) ? "external" : "internal"); /* clear watchdog status */ tx4927_ccfg_set(TX4927_CCFG_WDRST); /* W1C */ txx9_wdt_now(TX4927_TMR_REG(2) & 0xfffffffffULL); while (!(____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST)) ; mdelay(10); if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDREXEN) { pr_emerg("Rebooting (with internal watchdog reset)...\n"); /* External WDRST failed. Do internal watchdog reset */ tx4927_ccfg_clear(TX4927_CCFG_WDREXEN); } /* fallback */ (*_machine_halt)(); } void show_registers(struct pt_regs *regs); static int tx4927_be_handler(struct pt_regs *regs, int is_fixup) { int data = regs->cp0_cause & 4; console_verbose(); pr_err("%cBE exception at %#lx\n", data ? 'D' : 'I', regs->cp0_epc); pr_err("ccfg:%llx, toea:%llx\n", (unsigned long long)____raw_readq(&tx4927_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4927_ccfgptr->toea)); #ifdef CONFIG_PCI tx4927_report_pcic_status(); #endif show_registers(regs); panic("BusError!"); } static void __init tx4927_be_init(void) { board_be_handler = tx4927_be_handler; } static struct resource tx4927_sdram_resource[4]; void __init tx4927_setup(void) { int i; __u32 divmode; unsigned int cpuclk = 0; u64 ccfg; txx9_reg_res_init(TX4927_REV_PCODE(), TX4927_REG_BASE, TX4927_REG_SIZE); set_c0_config(TX49_CONF_CWFON); /* SDRAMC,EBUSC are configured by PROM */ for (i = 0; i < 8; i++) { if (!(TX4927_EBUSC_CR(i) & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX4927_EBUSC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX4927_EBUSC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ ccfg = ____raw_readq(&tx4927_ccfgptr->ccfg); if (txx9_master_clock) { /* calculate gbus_clock and cpu_clock from master_clock */ divmode = (__u32)ccfg & TX4927_CCFG_DIVMODE_MASK; switch (divmode) { case TX4927_CCFG_DIVMODE_8: case TX4927_CCFG_DIVMODE_10: case TX4927_CCFG_DIVMODE_12: case TX4927_CCFG_DIVMODE_16: txx9_gbus_clock = txx9_master_clock * 4; break; default: txx9_gbus_clock = txx9_master_clock; } switch (divmode) { case TX4927_CCFG_DIVMODE_2: case TX4927_CCFG_DIVMODE_8: cpuclk = txx9_gbus_clock * 2; break; case TX4927_CCFG_DIVMODE_2_5: case TX4927_CCFG_DIVMODE_10: cpuclk = txx9_gbus_clock * 5 / 2; break; case TX4927_CCFG_DIVMODE_3: case TX4927_CCFG_DIVMODE_12: cpuclk = txx9_gbus_clock * 3; break; case TX4927_CCFG_DIVMODE_4: case TX4927_CCFG_DIVMODE_16: cpuclk = txx9_gbus_clock * 4; break; } txx9_cpu_clock = cpuclk; } else { if (txx9_cpu_clock == 0) txx9_cpu_clock = 200000000; /* 200MHz */ /* calculate gbus_clock and master_clock from cpu_clock */ cpuclk = txx9_cpu_clock; divmode = (__u32)ccfg & TX4927_CCFG_DIVMODE_MASK; switch (divmode) { case TX4927_CCFG_DIVMODE_2: case TX4927_CCFG_DIVMODE_8: txx9_gbus_clock = cpuclk / 2; break; case TX4927_CCFG_DIVMODE_2_5: case TX4927_CCFG_DIVMODE_10: txx9_gbus_clock = cpuclk * 2 / 5; break; case TX4927_CCFG_DIVMODE_3: case TX4927_CCFG_DIVMODE_12: txx9_gbus_clock = cpuclk / 3; break; case TX4927_CCFG_DIVMODE_4: case TX4927_CCFG_DIVMODE_16: txx9_gbus_clock = cpuclk / 4; break; } switch (divmode) { case TX4927_CCFG_DIVMODE_8: case TX4927_CCFG_DIVMODE_10: case TX4927_CCFG_DIVMODE_12: case TX4927_CCFG_DIVMODE_16: txx9_master_clock = txx9_gbus_clock / 4; break; default: txx9_master_clock = txx9_gbus_clock; } } /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ tx4927_wdr_init(); /* clear BusErrorOnWrite flag (W1C) */ tx4927_ccfg_set(TX4927_CCFG_BEOW); /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx4927_ccfg_set(TX4927_CCFG_TOE); /* DMA selection */ txx9_clear64(&tx4927_ccfgptr->pcfg, TX4927_PCFG_DMASEL_ALL); /* Use external clock for external arbiter */ if (!(____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCIARB)) txx9_clear64(&tx4927_ccfgptr->pcfg, TX4927_PCFG_PCICLKEN_ALL); printk(KERN_INFO "%s -- %dMHz(M%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n", txx9_pcode_str, (cpuclk + 500000) / 1000000, (txx9_master_clock + 500000) / 1000000, (__u32)____raw_readq(&tx4927_ccfgptr->crir), (unsigned long long)____raw_readq(&tx4927_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4927_ccfgptr->pcfg)); printk(KERN_INFO "%s SDRAMC --", txx9_pcode_str); for (i = 0; i < 4; i++) { __u64 cr = TX4927_SDRAMC_CR(i); unsigned long base, size; if (!((__u32)cr & 0x00000400)) continue; /* disabled */ base = (unsigned long)(cr >> 49) << 21; size = (((unsigned long)(cr >> 33) & 0x7fff) + 1) << 21; printk(" CR%d:%016llx", i, (unsigned long long)cr); tx4927_sdram_resource[i].name = "SDRAM"; tx4927_sdram_resource[i].start = base; tx4927_sdram_resource[i].end = base + size - 1; tx4927_sdram_resource[i].flags = IORESOURCE_MEM; request_resource(&iomem_resource, &tx4927_sdram_resource[i]); } printk(" TR:%09llx\n", (unsigned long long)____raw_readq(&tx4927_sdramcptr->tr)); /* TMR */ /* disable all timers */ for (i = 0; i < TX4927_NR_TMR; i++) txx9_tmr_init(TX4927_TMR_REG(i) & 0xfffffffffULL); /* PIO */ txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO); __raw_writel(0, &tx4927_pioptr->maskcpu); __raw_writel(0, &tx4927_pioptr->maskext); _machine_restart = tx4927_machine_restart; board_be_init = tx4927_be_init; } void __init tx4927_time_init(unsigned int tmrnr) { if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_TINTDIS) txx9_clockevent_init(TX4927_TMR_REG(tmrnr) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4927_IR_TMR(tmrnr), TXX9_IMCLK); } void __init tx4927_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; for (i = 0; i < 2; i++) txx9_sio_init(TX4927_SIO_REG(i) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4927_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } void __init tx4927_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX4927_EBUSC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(TX4927_EBUSC_CR(ch) & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); } void __init tx4927_dmac_init(int memcpy_chan) { struct txx9dmac_platform_data plat_data = { .memcpy_chan = memcpy_chan, .have_64bit_regs = true, }; txx9_dmac_init(0, TX4927_DMA_REG & 0xfffffffffULL, TXX9_IRQ_BASE + TX4927_IR_DMA(0), &plat_data); } void __init tx4927_aclc_init(unsigned int dma_chan_out, unsigned int dma_chan_in) { u64 pcfg = __raw_readq(&tx4927_ccfgptr->pcfg); __u64 dmasel_mask = 0, dmasel = 0; unsigned long flags; if (!(pcfg & TX4927_PCFG_SEL2)) return; /* setup DMASEL (playback:ACLC ch0, capture:ACLC ch1) */ switch (dma_chan_out) { case 0: dmasel_mask |= TX4927_PCFG_DMASEL0_MASK; dmasel |= TX4927_PCFG_DMASEL0_ACL0; break; case 2: dmasel_mask |= TX4927_PCFG_DMASEL2_MASK; dmasel |= TX4927_PCFG_DMASEL2_ACL0; break; default: return; } switch (dma_chan_in) { case 1: dmasel_mask |= TX4927_PCFG_DMASEL1_MASK; dmasel |= TX4927_PCFG_DMASEL1_ACL1; break; case 3: dmasel_mask |= TX4927_PCFG_DMASEL3_MASK; dmasel |= TX4927_PCFG_DMASEL3_ACL1; break; default: return; } local_irq_save(flags); txx9_clear64(&tx4927_ccfgptr->pcfg, dmasel_mask); txx9_set64(&tx4927_ccfgptr->pcfg, dmasel); local_irq_restore(flags); txx9_aclc_init(TX4927_ACLC_REG & 0xfffffffffULL, TXX9_IRQ_BASE + TX4927_IR_ACLC, 0, dma_chan_out, dma_chan_in); } static void __init tx4927_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; char buf[128]; buf[0] = '\0'; local_irq_disable(); pcfg = ____raw_readq(&tx4927_ccfgptr->pcfg); if (!(pcfg & TX4927_PCFG_SEL2)) { rst |= TX4927_CLKCTR_ACLRST; ckd |= TX4927_CLKCTR_ACLCKD; strcat(buf, " ACLC"); } if (rst | ckd) { txx9_set64(&tx4927_ccfgptr->clkctr, rst); txx9_set64(&tx4927_ccfgptr->clkctr, ckd); } local_irq_enable(); if (buf[0]) pr_info("%s: stop%s\n", txx9_pcode_str, buf); } static int __init tx4927_late_init(void) { if (txx9_pcode != 0x4927) return -ENODEV; tx4927_stop_unused_modules(); return 0; } late_initcall(tx4927_late_init);
gpl-2.0
dmitriy103/amethyst-bravo-kernel
arch/mips/sni/eisa.c
9302
1176
/* * Virtual EISA root driver. * Acts as a placeholder if we don't have a proper EISA bridge. * * (C) 2003 Marc Zyngier <maz@wild-wind.fr.eu.org> * modified for SNI usage by Thomas Bogendoerfer * * This code is released under the GPL version 2. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/eisa.h> #include <linux/init.h> /* The default EISA device parent (virtual root device). * Now use a platform device, since that's the obvious choice. */ static struct platform_device eisa_root_dev = { .name = "eisa", .id = 0, }; static struct eisa_root_device eisa_bus_root = { .dev = &eisa_root_dev.dev, .bus_base_addr = 0, .res = &ioport_resource, .slots = EISA_MAX_SLOTS, .dma_mask = 0xffffffff, .force_probe = 1, }; int __init sni_eisa_root_init(void) { int r; r = platform_device_register(&eisa_root_dev); if (!r) return r; dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root); if (eisa_root_register(&eisa_bus_root)) { /* A real bridge may have been registered before * us. So quietly unregister. */ platform_device_unregister(&eisa_root_dev); return -1; } return 0; }
gpl-2.0
derlorenz/sony_msm8974ab
fs/hfs/trans.c
11862
3442
/* * linux/fs/hfs/trans.c * * Copyright (C) 1995-1997 Paul H. Hargrove * This file may be distributed under the terms of the GNU General Public License. * * This file contains routines for converting between the Macintosh * character set and various other encodings. This includes dealing * with ':' vs. '/' as the path-element separator. */ #include <linux/types.h> #include <linux/nls.h> #include "hfs_fs.h" /*================ Global functions ================*/ /* * hfs_mac2asc() * * Given a 'Pascal String' (a string preceded by a length byte) in * the Macintosh character set produce the corresponding filename using * the 'trivial' name-mangling scheme, returning the length of the * mangled filename. Note that the output string is not NULL * terminated. * * The name-mangling works as follows: * The character '/', which is illegal in Linux filenames is replaced * by ':' which never appears in HFS filenames. All other characters * are passed unchanged from input to output. */ int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; if (srclen > HFS_NAMELEN) srclen = HFS_NAMELEN; dst = out; dstlen = HFS_MAX_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0) { if (nls_disk) { size = nls_disk->char2uni(src, srclen, &ch); if (size <= 0) { ch = '?'; size = 1; } src += size; srclen -= size; } else { ch = *src++; srclen--; } if (ch == '/') ch = ':'; size = nls_io->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } } else { char ch; while (--srclen >= 0) *dst++ = (ch = *src++) == '/' ? ':' : ch; } out: return dst - out; } /* * hfs_asc2mac() * * Given an ASCII string (not null-terminated) and its length, * generate the corresponding filename in the Macintosh character set * using the 'trivial' name-mangling scheme, returning the length of * the mangled filename. Note that the output string is not NULL * terminated. * * This routine is a inverse to hfs_mac2triv(). * A ':' is replaced by a '/'. */ void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, struct qstr *in) { struct nls_table *nls_disk = HFS_SB(sb)->nls_disk; struct nls_table *nls_io = HFS_SB(sb)->nls_io; const char *src; char *dst; int srclen, dstlen, size; src = in->name; srclen = in->len; dst = out->name; dstlen = HFS_NAMELEN; if (nls_io) { wchar_t ch; while (srclen > 0) { size = nls_io->char2uni(src, srclen, &ch); if (size < 0) { ch = '?'; size = 1; } src += size; srclen -= size; if (ch == ':') ch = '/'; if (nls_disk) { size = nls_disk->uni2char(ch, dst, dstlen); if (size < 0) { if (size == -ENAMETOOLONG) goto out; *dst = '?'; size = 1; } dst += size; dstlen -= size; } else { *dst++ = ch > 0xff ? '?' : ch; dstlen--; } } } else { char ch; if (dstlen > srclen) dstlen = srclen; while (--dstlen >= 0) *dst++ = (ch = *src++) == ':' ? '/' : ch; } out: out->len = dst - (char *)out->name; dstlen = HFS_NAMELEN - out->len; while (--dstlen >= 0) *dst++ = 0; }
gpl-2.0
hallovveen31/HELLRAZOR
arch/arm/mach-msm/qdsp5/audio_evrc_in.c
87
38875
/* arch/arm/mach-msm/qdsp5/audio_evrc_in.c * * evrc audio input device * * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This code is based in part on arch/arm/mach-msm/qdsp5v2/audio_evrc_in.c, * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/msm_audio_qcp.h> #include <linux/memory_alloc.h> #include <linux/ion.h> #include <asm/atomic.h> #include <asm/ioctls.h> #include <mach/msm_memtypes.h> #include <mach/msm_adsp.h> #include <mach/msm_rpcrouter.h> #include <mach/iommu.h> #include <mach/iommu_domains.h> #include <mach/msm_subsystem_map.h> #include "audmgr.h" #include <mach/qdsp5/qdsp5audpreproc.h> #include <mach/qdsp5/qdsp5audpreproccmdi.h> #include <mach/qdsp5/qdsp5audpreprocmsg.h> #include <mach/qdsp5/qdsp5audreccmdi.h> #include <mach/qdsp5/qdsp5audrecmsg.h> #include <mach/debug_mm.h> #define FRAME_HEADER_SIZE 8 /* 8 bytes frame header */ #define NT_FRAME_HEADER_SIZE 24 /* 24 bytes frame header */ /* FRAME_NUM must be a power of two */ #define FRAME_NUM 8 #define EVRC_FRAME_SIZE 36 /* 36 bytes data */ /*Tunnel mode : 36 bytes data + 8 byte header*/ #define FRAME_SIZE (EVRC_FRAME_SIZE + FRAME_HEADER_SIZE) /* 36 bytes data + 24 meta field*/ #define NT_FRAME_SIZE (EVRC_FRAME_SIZE + NT_FRAME_HEADER_SIZE) #define DMASZ (FRAME_SIZE * FRAME_NUM) #define NT_DMASZ (NT_FRAME_SIZE * FRAME_NUM) #define OUT_FRAME_NUM 2 #define OUT_BUFFER_SIZE (4 * 1024 + NT_FRAME_HEADER_SIZE) #define BUFFER_SIZE (OUT_BUFFER_SIZE * OUT_FRAME_NUM) #define AUDPREPROC_EVRC_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer*/ #define AUDPREPROC_EVRC_EOS_FLG_MASK 0x01 #define AUDPREPROC_EVRC_EOS_NONE 0x0 /* No EOS detected */ #define AUDPREPROC_EVRC_EOS_SET 0x1 /* EOS set in meta field */ struct buffer { void *data; uint32_t size; uint32_t read; uint32_t addr; uint32_t used; uint32_t mfield_sz; }; struct audio_evrc_in { struct buffer in[FRAME_NUM]; spinlock_t dsp_lock; atomic_t in_bytes; atomic_t in_samples; struct mutex lock; struct mutex read_lock; wait_queue_head_t wait; wait_queue_head_t wait_enable; /*write section*/ struct buffer out[OUT_FRAME_NUM]; uint8_t out_head; uint8_t out_tail; uint8_t out_needed; /* number of buffers the dsp is waiting for */ uint32_t out_count; struct mutex write_lock; wait_queue_head_t write_wait; int32_t out_phys; /* physical address of write buffer */ char *out_data; int mfield; /* meta field embedded in data */ int wflush; /*write flush */ int rflush; /*read flush*/ int out_frame_cnt; struct msm_adsp_module *audrec; struct msm_adsp_module *audpre; /* configuration to use on next enable */ uint32_t samp_rate; uint32_t channel_mode; uint32_t buffer_size; /* Frame size (36 bytes) */ uint32_t enc_type; /* 11 for EVRC */ uint32_t mode; /* T or NT Mode*/ struct msm_audio_evrc_enc_config cfg; uint32_t dsp_cnt; uint32_t in_head; /* next buffer dsp will write */ uint32_t in_tail; /* next buffer read() will read */ uint32_t in_count; /* number of buffers available to read() */ uint32_t eos_ack; uint32_t flush_ack; const char *module_name; unsigned queue_ids; uint16_t enc_id; /* Session Id */ unsigned short samp_rate_index; uint32_t audrec_obj_idx ; struct audmgr audmgr; /* data allocated for various buffers */ char *data; dma_addr_t phys; void *map_v_read; void *map_v_write; int opened; int enabled; int running; int stopped; /* set when stopped, cleared on flush */ struct ion_client *client; struct ion_handle *input_buff_handle; struct ion_handle *output_buff_handle; }; struct audio_frame { uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; unsigned char raw_bitstream[]; } __packed; struct audio_frame_nt { uint16_t metadata_len; uint16_t frame_count_lsw; uint16_t frame_count_msw; uint16_t frame_length; uint16_t erased_pcm; uint16_t reserved; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; unsigned char raw_bitstream[]; /* samples */ } __packed; struct evrc_encoded_meta_out { uint16_t metadata_len; uint16_t time_stamp_dword_lsw; uint16_t time_stamp_dword_msw; uint16_t time_stamp_lsw; uint16_t time_stamp_msw; uint16_t nflag_lsw; uint16_t nflag_msw; }; /* Audrec Queue command sent macro's */ #define audio_send_queue_pre(audio, cmd, len) \ msm_adsp_write(audio->audpre, QDSP_uPAudPreProcCmdQueue, cmd, len) #define audio_send_queue_recbs(audio, cmd, len) \ msm_adsp_write(audio->audrec, ((audio->queue_ids & 0xFFFF0000) >> 16),\ cmd, len) #define audio_send_queue_rec(audio, cmd, len) \ msm_adsp_write(audio->audrec, (audio->queue_ids & 0x0000FFFF),\ cmd, len) static int audevrc_in_dsp_enable(struct audio_evrc_in *audio, int enable); static int audevrc_in_encparam_config(struct audio_evrc_in *audio); static int audevrc_in_encmem_config(struct audio_evrc_in *audio); static int audevrc_in_dsp_read_buffer(struct audio_evrc_in *audio, uint32_t read_cnt); static void audevrc_in_flush(struct audio_evrc_in *audio); static void audevrc_in_get_dsp_frames(struct audio_evrc_in *audio); static int audpcm_config(struct audio_evrc_in *audio); static void audevrc_out_flush(struct audio_evrc_in *audio); static int audevrc_in_routing_mode_config(struct audio_evrc_in *audio); static void audrec_pcm_send_data(struct audio_evrc_in *audio, unsigned needed); static void audevrc_nt_in_get_dsp_frames(struct audio_evrc_in *audio); static void audevrc_in_flush(struct audio_evrc_in *audio); static unsigned convert_samp_index(unsigned index) { switch (index) { case RPC_AUD_DEF_SAMPLE_RATE_48000: return 48000; case RPC_AUD_DEF_SAMPLE_RATE_44100: return 44100; case RPC_AUD_DEF_SAMPLE_RATE_32000: return 32000; case RPC_AUD_DEF_SAMPLE_RATE_24000: return 24000; case RPC_AUD_DEF_SAMPLE_RATE_22050: return 22050; case RPC_AUD_DEF_SAMPLE_RATE_16000: return 16000; case RPC_AUD_DEF_SAMPLE_RATE_12000: return 12000; case RPC_AUD_DEF_SAMPLE_RATE_11025: return 11025; case RPC_AUD_DEF_SAMPLE_RATE_8000: return 8000; default: return 11025; } } /* must be called with audio->lock held */ static int audevrc_in_enable(struct audio_evrc_in *audio) { struct audmgr_config cfg; int rc; if (audio->enabled) return 0; cfg.tx_rate = audio->samp_rate; cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE; cfg.def_method = RPC_AUD_DEF_METHOD_RECORD; cfg.codec = RPC_AUD_DEF_CODEC_EVRC; cfg.snd_method = RPC_SND_METHOD_MIDI; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { rc = audmgr_enable(&audio->audmgr, &cfg); if (rc < 0) return rc; if (msm_adsp_enable(audio->audpre)) { audmgr_disable(&audio->audmgr); MM_ERR("msm_adsp_enable(audpre) failed\n"); return -ENODEV; } } if (msm_adsp_enable(audio->audrec)) { if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { audmgr_disable(&audio->audmgr); msm_adsp_disable(audio->audpre); } MM_ERR("msm_adsp_enable(audrec) failed\n"); return -ENODEV; } audio->enabled = 1; audevrc_in_dsp_enable(audio, 1); return 0; } /* must be called with audio->lock held */ static int audevrc_in_disable(struct audio_evrc_in *audio) { if (audio->enabled) { audio->enabled = 0; audevrc_in_dsp_enable(audio, 0); wake_up(&audio->wait); wait_event_interruptible_timeout(audio->wait_enable, audio->running == 0, 1*HZ); msm_adsp_disable(audio->audrec); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { msm_adsp_disable(audio->audpre); audmgr_disable(&audio->audmgr); } } return 0; } /* ------------------- dsp --------------------- */ static void audpre_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { uint16_t msg[2]; getevent(msg, sizeof(msg)); switch (id) { case AUDPREPROC_MSG_CMD_CFG_DONE_MSG: MM_DBG("type %d, status_flag %d\n", msg[0], msg[1]); break; case AUDPREPROC_MSG_ERROR_MSG_ID: MM_ERR("err_index %d\n", msg[0]); break; case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module enable(audpreproctask)\n"); break; default: MM_ERR("unknown event %d\n", id); } } static void audevrc_in_get_dsp_frames(struct audio_evrc_in *audio) { struct audio_frame *frame; uint32_t index; unsigned long flags; index = audio->in_head; frame = (void *) (((char *)audio->in[index].data) - sizeof(*frame)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) { MM_ERR("Error! not able to keep up the read\n"); audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); MM_ERR("in_count = %d\n", audio->in_count); } else audio->in_count++; audevrc_in_dsp_read_buffer(audio, audio->dsp_cnt++); spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } static void audevrc_nt_in_get_dsp_frames(struct audio_evrc_in *audio) { struct audio_frame_nt *nt_frame; uint32_t index; unsigned long flags; index = audio->in_head; nt_frame = (void *) (((char *)audio->in[index].data) - \ sizeof(struct audio_frame_nt)); spin_lock_irqsave(&audio->dsp_lock, flags); audio->in[index].size = nt_frame->frame_length; /* statistics of read */ atomic_add(audio->in[index].size, &audio->in_bytes); atomic_add(1, &audio->in_samples); audio->in_head = (audio->in_head + 1) & (FRAME_NUM - 1); /* If overflow, move the tail index foward. */ if (audio->in_head == audio->in_tail) MM_DBG("Error! not able to keep up the read\n"); else audio->in_count++; spin_unlock_irqrestore(&audio->dsp_lock, flags); wake_up(&audio->wait); } static int audrec_pcm_buffer_ptr_refresh(struct audio_evrc_in *audio, unsigned idx, unsigned len) { struct audrec_cmd_pcm_buffer_ptr_refresh_arm_enc cmd; if (len == NT_FRAME_HEADER_SIZE) len = len / 2; else len = (len + NT_FRAME_HEADER_SIZE) / 2; MM_DBG("len = %d\n", len); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_BUFFER_PTR_REFRESH_ARM_TO_ENC; cmd.num_buffers = 1; if (cmd.num_buffers == 1) { cmd.buf_address_length[0] = (audio->out[idx].addr & 0xffff0000) >> 16; cmd.buf_address_length[1] = (audio->out[idx].addr & 0x0000ffff); cmd.buf_address_length[2] = (len & 0xffff0000) >> 16; cmd.buf_address_length[3] = (len & 0x0000ffff); } audio->out_frame_cnt++; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audpcm_config(struct audio_evrc_in *audio) { struct audrec_cmd_pcm_cfg_arm_to_enc cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PCM_CFG_ARM_TO_ENC; cmd.config_update_flag = AUDREC_PCM_CONFIG_UPDATE_FLAG_ENABLE; cmd.enable_flag = AUDREC_ENABLE_FLAG_VALUE; cmd.sampling_freq = convert_samp_index(audio->samp_rate); if (!audio->channel_mode) cmd.channels = 1; else cmd.channels = 2; cmd.frequency_of_intimation = 1; cmd.max_number_of_buffers = OUT_FRAME_NUM; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audevrc_in_routing_mode_config(struct audio_evrc_in *audio) { struct audrec_cmd_routing_mode cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ROUTING_MODE; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) cmd.routing_mode = 1; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static void audrec_dsp_event(void *data, unsigned id, size_t len, void (*getevent)(void *ptr, size_t len)) { struct audio_evrc_in *audio = NULL; if (data) audio = data; else { MM_ERR("invalid data for event %x\n", id); return; } switch (id) { case AUDREC_MSG_CMD_CFG_DONE_MSG: { struct audrec_msg_cmd_cfg_done_msg cmd_cfg_done_msg; getevent(&cmd_cfg_done_msg, AUDREC_MSG_CMD_CFG_DONE_MSG_LEN); if (cmd_cfg_done_msg.audrec_enc_type & \ AUDREC_MSG_CFG_DONE_ENC_ENA) { audio->audrec_obj_idx = cmd_cfg_done_msg.audrec_obj_idx; MM_DBG("CFG ENABLED\n"); if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { MM_DBG("routing command\n"); audevrc_in_routing_mode_config(audio); } else { audevrc_in_encmem_config(audio); } } else { MM_DBG("CFG SLEEP\n"); audio->running = 0; wake_up(&audio->wait_enable); } break; } case AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG: { struct audrec_msg_cmd_routing_mode_done_msg \ routing_msg; getevent(&routing_msg, AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG); MM_DBG("AUDREC_MSG_CMD_ROUTING_MODE_DONE_MSG"); if (routing_msg.configuration == 0) { MM_ERR("routing configuration failed\n"); audio->running = 0; wake_up(&audio->wait_enable); } else audevrc_in_encmem_config(audio); break; } case AUDREC_MSG_CMD_AREC_MEM_CFG_DONE_MSG: { MM_DBG("AREC_MEM_CFG_DONE_MSG\n"); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) audevrc_in_encparam_config(audio); else audpcm_config(audio); break; } case AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG: { MM_DBG("AUDREC_CMD_PCM_CFG_ARM_TO_ENC_DONE_MSG"); audevrc_in_encparam_config(audio); break; } case AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG: { MM_DBG("AUDREC_MSG_CMD_AREC_PARAM_CFG_DONE_MSG\n"); audio->running = 1; wake_up(&audio->wait_enable); if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) audrec_pcm_send_data(audio, 1); break; } case AUDREC_CMD_PCM_BUFFER_PTR_UPDATE_ARM_TO_ENC_MSG: { MM_DBG("ptr_update recieved from DSP\n"); audrec_pcm_send_data(audio, 1); break; } case AUDREC_MSG_NO_EXT_PKT_AVAILABLE_MSG: { struct audrec_msg_no_ext_pkt_avail_msg err_msg; getevent(&err_msg, AUDREC_MSG_NO_EXT_PKT_AVAILABLE_MSG_LEN); MM_DBG("NO_EXT_PKT_AVAILABLE_MSG %x\n",\ err_msg.audrec_err_id); break; } case AUDREC_MSG_PACKET_READY_MSG: { struct audrec_msg_packet_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_MSG_PACKET_READY_MSG_LEN); MM_DBG("UP_PACKET_READY_MSG: write cnt msw %d \ write cnt lsw %d read cnt msw %d read cnt lsw %d \n",\ pkt_ready_msg.pkt_counter_msw, \ pkt_ready_msg.pkt_counter_lsw, \ pkt_ready_msg.pkt_read_cnt_msw, \ pkt_ready_msg.pkt_read_cnt_lsw); audevrc_in_get_dsp_frames(audio); break; } case AUDREC_UP_NT_PACKET_READY_MSG: { struct audrec_up_nt_packet_ready_msg pkt_ready_msg; getevent(&pkt_ready_msg, AUDREC_UP_NT_PACKET_READY_MSG_LEN); MM_DBG("UP_NT_PACKET_READY_MSG: write cnt lsw %d \ write cnt msw %d read cnt lsw %d read cnt msw %d \n",\ pkt_ready_msg.audrec_packetwrite_cnt_lsw, \ pkt_ready_msg.audrec_packetwrite_cnt_msw, \ pkt_ready_msg.audrec_upprev_readcount_lsw, \ pkt_ready_msg.audrec_upprev_readcount_msw); audevrc_nt_in_get_dsp_frames(audio); break; } case AUDREC_CMD_FLUSH_DONE_MSG: { audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 1; wake_up(&audio->write_wait); MM_DBG("flush ack recieved\n"); break; } case ADSP_MESSAGE_ID: MM_DBG("Received ADSP event: module \ enable/disable(audrectask)\n"); break; default: MM_ERR("unknown event %d\n", id); } } static struct msm_adsp_ops audpre_evrc_adsp_ops = { .event = audpre_dsp_event, }; static struct msm_adsp_ops audrec_evrc_adsp_ops = { .event = audrec_dsp_event, }; static int audevrc_in_dsp_enable(struct audio_evrc_in *audio, int enable) { struct audrec_cmd_enc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ENC_CFG; cmd.audrec_enc_type = (audio->enc_type & 0xFF) | (enable ? AUDREC_CMD_ENC_ENA : AUDREC_CMD_ENC_DIS); /* Don't care */ cmd.audrec_obj_idx = audio->audrec_obj_idx; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audevrc_in_encmem_config(struct audio_evrc_in *audio) { struct audrec_cmd_arecmem_cfg cmd; uint16_t *data = (void *) audio->data; int n; int header_len = 0; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_ARECMEM_CFG; cmd.audrec_obj_idx = audio->audrec_obj_idx; /* Rate at which packet complete message comes */ cmd.audrec_up_pkt_intm_cnt = 1; cmd.audrec_extpkt_buffer_msw = audio->phys >> 16; cmd.audrec_extpkt_buffer_lsw = audio->phys; /* Max Buffer no available for frames */ cmd.audrec_extpkt_buffer_num = FRAME_NUM; /* prepare buffer pointers: * T:36 bytes evrc packet + 4 halfword header * NT:36 bytes evrc packet + 12 halfword header */ if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) header_len = FRAME_HEADER_SIZE/2; else header_len = NT_FRAME_HEADER_SIZE/2; for (n = 0; n < FRAME_NUM; n++) { audio->in[n].data = data + header_len; data += (EVRC_FRAME_SIZE/2) + header_len; MM_DBG("0x%8x\n", (int)(audio->in[n].data - header_len*2)); } return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audevrc_in_encparam_config(struct audio_evrc_in *audio) { struct audrec_cmd_arecparam_evrc_cfg cmd; memset(&cmd, 0, sizeof(cmd)); cmd.common.cmd_id = AUDREC_CMD_ARECPARAM_CFG; cmd.common.audrec_obj_idx = audio->audrec_obj_idx; cmd.enc_min_rate = audio->cfg.min_bit_rate; cmd.enc_max_rate = audio->cfg.max_bit_rate; cmd.rate_modulation_cmd = 0; /* Default set to 0 */ return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audevrc_flush_command(struct audio_evrc_in *audio) { struct audrec_cmd_flush cmd; MM_DBG("\n"); memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_FLUSH; return audio_send_queue_rec(audio, &cmd, sizeof(cmd)); } static int audevrc_in_dsp_read_buffer(struct audio_evrc_in *audio, uint32_t read_cnt) { audrec_cmd_packet_ext_ptr cmd; memset(&cmd, 0, sizeof(cmd)); cmd.cmd_id = AUDREC_CMD_PACKET_EXT_PTR; cmd.type = audio->audrec_obj_idx; cmd.curr_rec_count_msw = read_cnt >> 16; cmd.curr_rec_count_lsw = read_cnt; return audio_send_queue_recbs(audio, &cmd, sizeof(cmd)); } /* ------------------- device --------------------- */ static void audevrc_ioport_reset(struct audio_evrc_in *audio) { /* Make sure read/write thread are free from * sleep and knowing that system is not able * to process io request at the moment */ wake_up(&audio->write_wait); mutex_lock(&audio->write_lock); audevrc_in_flush(audio); mutex_unlock(&audio->write_lock); wake_up(&audio->wait); mutex_lock(&audio->read_lock); audevrc_out_flush(audio); mutex_unlock(&audio->read_lock); } static void audevrc_in_flush(struct audio_evrc_in *audio) { int i; audio->dsp_cnt = 0; audio->in_head = 0; audio->in_tail = 0; audio->in_count = 0; audio->eos_ack = 0; for (i = FRAME_NUM-1; i >= 0; i--) { audio->in[i].size = 0; audio->in[i].read = 0; } MM_DBG("in_bytes %d\n", atomic_read(&audio->in_bytes)); MM_DBG("in_samples %d\n", atomic_read(&audio->in_samples)); atomic_set(&audio->in_bytes, 0); atomic_set(&audio->in_samples, 0); } static void audevrc_out_flush(struct audio_evrc_in *audio) { int i; audio->out_head = 0; audio->out_tail = 0; audio->out_count = 0; for (i = OUT_FRAME_NUM-1; i >= 0; i--) { audio->out[i].size = 0; audio->out[i].read = 0; audio->out[i].used = 0; } } /* ------------------- device --------------------- */ static long audevrc_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct audio_evrc_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); if (cmd == AUDIO_GET_STATS) { struct msm_audio_stats stats; stats.byte_count = atomic_read(&audio->in_bytes); stats.sample_count = atomic_read(&audio->in_samples); if (copy_to_user((void *) arg, &stats, sizeof(stats))) return -EFAULT; return rc; } mutex_lock(&audio->lock); switch (cmd) { case AUDIO_START: { rc = audevrc_in_enable(audio); if (!rc) { rc = wait_event_interruptible_timeout(audio->wait_enable, audio->running != 0, 1*HZ); MM_DBG("state %d rc = %d\n", audio->running, rc); if (audio->running == 0) rc = -ENODEV; else rc = 0; } audio->stopped = 0; break; } case AUDIO_STOP: { rc = audevrc_in_disable(audio); audio->stopped = 1; break; } case AUDIO_FLUSH: { MM_DBG("AUDIO_FLUSH\n"); audio->rflush = 1; audio->wflush = 1; audevrc_ioport_reset(audio); if (audio->running) { audevrc_flush_command(audio); rc = wait_event_interruptible(audio->write_wait, !audio->wflush); if (rc < 0) { MM_ERR("AUDIO_FLUSH interrupted\n"); rc = -EINTR; } } else { audio->rflush = 0; audio->wflush = 0; } break; } case AUDIO_GET_CONFIG: { struct msm_audio_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = OUT_BUFFER_SIZE; cfg.buffer_count = OUT_FRAME_NUM; cfg.sample_rate = convert_samp_index(audio->samp_rate); cfg.channel_count = 1; cfg.type = 0; cfg.unused[0] = 0; cfg.unused[1] = 0; cfg.unused[2] = 0; if (copy_to_user((void *) arg, &cfg, sizeof(cfg))) rc = -EFAULT; else rc = 0; break; } case AUDIO_GET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; memset(&cfg, 0, sizeof(cfg)); cfg.buffer_size = audio->buffer_size; cfg.buffer_count = FRAME_NUM; if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) rc = -EFAULT; else rc = 0; break; } case AUDIO_SET_STREAM_CONFIG: { struct msm_audio_stream_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } /* Allow only single frame */ if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { if (cfg.buffer_size != (FRAME_SIZE - 8)) rc = -EINVAL; break; } else { if (cfg.buffer_size != (EVRC_FRAME_SIZE + 14)) rc = -EINVAL; break; } audio->buffer_size = cfg.buffer_size; break; } case AUDIO_GET_EVRC_ENC_CONFIG: { if (copy_to_user((void *) arg, &audio->cfg, sizeof(audio->cfg))) rc = -EFAULT; break; } case AUDIO_SET_EVRC_ENC_CONFIG: { struct msm_audio_evrc_enc_config cfg; if (copy_from_user(&cfg, (void *) arg, sizeof(cfg))) { rc = -EFAULT; break; } MM_DBG("0X%8x, 0x%8x, 0x%8x\n", cfg.min_bit_rate, cfg.max_bit_rate, cfg.cdma_rate); if (cfg.min_bit_rate > CDMA_RATE_FULL || \ cfg.min_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid min bitrate\n"); rc = -EFAULT; break; } if (cfg.max_bit_rate > CDMA_RATE_FULL || \ cfg.max_bit_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid max bitrate\n"); rc = -EFAULT; break; } /* Recording Does not support Erase and Blank */ if (cfg.cdma_rate > CDMA_RATE_FULL || cfg.cdma_rate < CDMA_RATE_EIGHTH) { MM_ERR("invalid qcelp cdma rate\n"); rc = -EFAULT; break; } memcpy(&audio->cfg, &cfg, sizeof(cfg)); break; } default: rc = -EINVAL; } mutex_unlock(&audio->lock); return rc; } static ssize_t audevrc_in_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct audio_evrc_in *audio = file->private_data; unsigned long flags; const char __user *start = buf; void *data; uint32_t index; uint32_t size; int rc = 0; struct evrc_encoded_meta_out meta_field; struct audio_frame_nt *nt_frame; MM_DBG("count = %d\n", count); mutex_lock(&audio->read_lock); while (count > 0) { rc = wait_event_interruptible( audio->wait, (audio->in_count > 0) || audio->stopped || audio->rflush); if (rc < 0) break; if (audio->rflush) { rc = -EBUSY; break; } if (audio->stopped && !audio->in_count) { MM_DBG("Driver in stop state, No more buffer to read"); rc = 0;/* End of File */ break; } index = audio->in_tail; data = (uint8_t *) audio->in[index].data; size = audio->in[index].size; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { nt_frame = (struct audio_frame_nt *)(data - sizeof(struct audio_frame_nt)); memcpy((char *)&meta_field.time_stamp_dword_lsw, (char *)&nt_frame->time_stamp_dword_lsw, (sizeof(struct evrc_encoded_meta_out) - \ sizeof(uint16_t))); meta_field.metadata_len = sizeof(struct evrc_encoded_meta_out); if (copy_to_user((char *)start, (char *)&meta_field, sizeof(struct evrc_encoded_meta_out))) { rc = -EFAULT; break; } if (nt_frame->nflag_lsw & 0x0001) { MM_ERR("recieved EOS in read call\n"); audio->eos_ack = 1; } buf += sizeof(struct evrc_encoded_meta_out); count -= sizeof(struct evrc_encoded_meta_out); } if (count >= size) { /* order the reads on the buffer */ dma_coherent_post_ops(); if (copy_to_user(buf, data, size)) { rc = -EFAULT; break; } spin_lock_irqsave(&audio->dsp_lock, flags); if (index != audio->in_tail) { /* overrun -- data is * invalid and we need to retry */ spin_unlock_irqrestore(&audio->dsp_lock, flags); continue; } audio->in[index].size = 0; audio->in_tail = (audio->in_tail + 1) & (FRAME_NUM - 1); audio->in_count--; spin_unlock_irqrestore(&audio->dsp_lock, flags); count -= size; buf += size; if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL)) { if (!audio->eos_ack) { MM_DBG("sending read ptr command \ %d %d\n", audio->dsp_cnt, audio->in_tail); audevrc_in_dsp_read_buffer(audio, audio->dsp_cnt++); } } } else { MM_ERR("short read\n"); break; } break; } mutex_unlock(&audio->read_lock); if (buf > start) return buf - start; return rc; } static void audrec_pcm_send_data(struct audio_evrc_in *audio, unsigned needed) { struct buffer *frame; unsigned long flags; MM_DBG("\n"); spin_lock_irqsave(&audio->dsp_lock, flags); if (!audio->running) goto done; if (needed && !audio->wflush) { /* We were called from the callback because the DSP * requested more data. Note that the DSP does want * more data, and if a buffer was in-flight, mark it * as available (since the DSP must now be done with * it). */ audio->out_needed = 1; frame = audio->out + audio->out_tail; if (frame->used == 0xffffffff) { MM_DBG("frame %d free\n", audio->out_tail); frame->used = 0; audio->out_tail ^= 1; wake_up(&audio->write_wait); } } if (audio->out_needed) { /* If the DSP currently wants data and we have a * buffer available, we will send it and reset * the needed flag. We'll mark the buffer as in-flight * so that it won't be recycled until the next buffer * is requested */ frame = audio->out + audio->out_tail; if (frame->used) { BUG_ON(frame->used == 0xffffffff); audrec_pcm_buffer_ptr_refresh(audio, audio->out_tail, frame->used); frame->used = 0xffffffff; audio->out_needed = 0; } } done: spin_unlock_irqrestore(&audio->dsp_lock, flags); } static int audevrc_in_fsync(struct file *file, int datasync) { struct audio_evrc_in *audio = file->private_data; int rc = 0; MM_DBG("\n"); /* Macro prints the file name and function */ if (!audio->running || (audio->mode == MSM_AUD_ENC_MODE_TUNNEL)) { rc = -EINVAL; goto done_nolock; } mutex_lock(&audio->write_lock); rc = wait_event_interruptible(audio->write_wait, audio->wflush); MM_DBG("waked on by some event audio->wflush = %d\n", audio->wflush); if (rc < 0) goto done; else if (audio->wflush) { rc = -EBUSY; goto done; } done: mutex_unlock(&audio->write_lock); done_nolock: return rc; } int audrec_evrc_process_eos(struct audio_evrc_in *audio, const char __user *buf_start, unsigned short mfield_size) { struct buffer *frame; int rc = 0; frame = audio->out + audio->out_head; rc = wait_event_interruptible(audio->write_wait, (audio->out_needed && audio->out[0].used == 0 && audio->out[1].used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto done; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto done; } if (copy_from_user(frame->data, buf_start, mfield_size)) { rc = -EFAULT; goto done; } frame->mfield_sz = mfield_size; audio->out_head ^= 1; frame->used = mfield_size; MM_DBG("copying meta_out frame->used = %d\n", frame->used); audrec_pcm_send_data(audio, 0); done: return rc; } static ssize_t audevrc_in_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct audio_evrc_in *audio = file->private_data; const char __user *start = buf; struct buffer *frame; char *cpy_ptr; int rc = 0, eos_condition = AUDPREPROC_EVRC_EOS_NONE; unsigned short mfield_size = 0; int write_count = 0; MM_DBG("cnt=%d\n", count); if (count & 1) return -EINVAL; if (audio->mode != MSM_AUD_ENC_MODE_NONTUNNEL) return -EINVAL; mutex_lock(&audio->write_lock); frame = audio->out + audio->out_head; /* if supplied count is more than driver buffer size * then only copy driver buffer size */ if (count > frame->size) count = frame->size; write_count = count; cpy_ptr = frame->data; rc = wait_event_interruptible(audio->write_wait, (frame->used == 0) || (audio->stopped) || (audio->wflush)); if (rc < 0) goto error; if (audio->stopped || audio->wflush) { rc = -EBUSY; goto error; } if (audio->mfield) { if (buf == start) { /* Processing beginning of user buffer */ if (__get_user(mfield_size, (unsigned short __user *) buf)) { rc = -EFAULT; goto error; } else if (mfield_size > count) { rc = -EINVAL; goto error; } MM_DBG("mf offset_val %x\n", mfield_size); if (copy_from_user(cpy_ptr, buf, mfield_size)) { rc = -EFAULT; goto error; } /* Check if EOS flag is set and buffer has * contains just meta field */ if (cpy_ptr[AUDPREPROC_EVRC_EOS_FLG_OFFSET] & AUDPREPROC_EVRC_EOS_FLG_MASK) { eos_condition = AUDPREPROC_EVRC_EOS_SET; MM_DBG("EOS SET\n"); if (mfield_size == count) { buf += mfield_size; eos_condition = 0; goto exit; } else cpy_ptr[AUDPREPROC_EVRC_EOS_FLG_OFFSET] &= ~AUDPREPROC_EVRC_EOS_FLG_MASK; } cpy_ptr += mfield_size; count -= mfield_size; buf += mfield_size; } else { mfield_size = 0; MM_DBG("continuous buffer\n"); } frame->mfield_sz = mfield_size; } MM_DBG("copying the stream count = %d\n", count); if (copy_from_user(cpy_ptr, buf, count)) { rc = -EFAULT; goto error; } exit: frame->used = count; audio->out_head ^= 1; if (!audio->flush_ack) audrec_pcm_send_data(audio, 0); else { audrec_pcm_send_data(audio, 1); audio->flush_ack = 0; } if (eos_condition == AUDPREPROC_EVRC_EOS_SET) rc = audrec_evrc_process_eos(audio, start, mfield_size); mutex_unlock(&audio->write_lock); return write_count; error: mutex_unlock(&audio->write_lock); return rc; } static int audevrc_in_release(struct inode *inode, struct file *file) { struct audio_evrc_in *audio = file->private_data; mutex_lock(&audio->lock); audevrc_in_disable(audio); audevrc_in_flush(audio); msm_adsp_put(audio->audrec); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) msm_adsp_put(audio->audpre); audpreproc_aenc_free(audio->enc_id); audio->audrec = NULL; audio->audpre = NULL; audio->opened = 0; if ((audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) && \ (audio->out_data)) { ion_unmap_kernel(audio->client, audio->input_buff_handle); ion_free(audio->client, audio->input_buff_handle); audio->out_data = NULL; } if (audio->data) { ion_unmap_kernel(audio->client, audio->output_buff_handle); ion_free(audio->client, audio->output_buff_handle); audio->data = NULL; } ion_client_destroy(audio->client); mutex_unlock(&audio->lock); return 0; } static struct audio_evrc_in the_audio_evrc_in; static int audevrc_in_open(struct inode *inode, struct file *file) { struct audio_evrc_in *audio = &the_audio_evrc_in; int rc; int encid; int dma_size = 0; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_NONTUNNEL; dma_size = NT_DMASZ; MM_DBG("Opened for non tunnel mode encoding\n"); } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; dma_size = DMASZ; MM_DBG("Opened for tunnel mode encoding\n"); } else { MM_ERR("Invalid mode\n"); rc = -EACCES; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_8000, audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_8000; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) audio->buffer_size = (EVRC_FRAME_SIZE + 14); else audio->buffer_size = EVRC_FRAME_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_EVRC | audio->mode; audio->cfg.cdma_rate = CDMA_RATE_FULL; audio->cfg.min_bit_rate = CDMA_RATE_FULL; audio->cfg.max_bit_rate = CDMA_RATE_FULL; if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { rc = audmgr_open(&audio->audmgr); if (rc) goto done; } encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_evrc_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) { rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre, &audpre_evrc_adsp_ops, audio); if (rc) { msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); goto done; } } audio->dsp_cnt = 0; audio->stopped = 0; audio->wflush = 0; audio->rflush = 0; audio->flush_ack = 0; audevrc_in_flush(audio); audevrc_out_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_EVRC_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", dma_size); handle = ion_alloc(client, dma_size, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->map_v_read = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } audio->data = audio->map_v_read; MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); audio->out_data = NULL; if (audio->mode == MSM_AUD_ENC_MODE_NONTUNNEL) { MM_DBG("allocating BUFFER_SIZE %d\n", BUFFER_SIZE); handle = ion_alloc(client, BUFFER_SIZE, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate I/P buffers\n"); rc = -ENOMEM; goto input_buff_alloc_error; } audio->input_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("I/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto input_buff_alloc_error; } else { MM_INFO("Got valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->out_phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto input_buff_alloc_error; } audio->map_v_write = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->map_v_write)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto input_buff_map_error; } audio->out_data = audio->map_v_write; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", (unsigned int)addr, (unsigned int)audio->out_data); /* Initialize buffer */ audio->out[0].data = audio->out_data + 0; audio->out[0].addr = audio->out_phys + 0; audio->out[0].size = OUT_BUFFER_SIZE; audio->out[1].data = audio->out_data + OUT_BUFFER_SIZE; audio->out[1].addr = audio->out_phys + OUT_BUFFER_SIZE; audio->out[1].size = OUT_BUFFER_SIZE; MM_DBG("audio->out[0].data = %d audio->out[1].data = %d", (unsigned int)audio->out[0].data, (unsigned int)audio->out[1].data); audio->mfield = NT_FRAME_HEADER_SIZE; audio->out_frame_cnt++; } file->private_data = audio; audio->opened = 1; done: mutex_unlock(&audio->lock); return rc; input_buff_map_error: ion_free(client, audio->input_buff_handle); input_buff_alloc_error: ion_unmap_kernel(client, audio->output_buff_handle); output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); if (audio->mode == MSM_AUD_ENC_MODE_TUNNEL) msm_adsp_put(audio->audpre); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; } static const struct file_operations audio_evrc_in_fops = { .owner = THIS_MODULE, .open = audevrc_in_open, .release = audevrc_in_release, .read = audevrc_in_read, .write = audevrc_in_write, .fsync = audevrc_in_fsync, .unlocked_ioctl = audevrc_in_ioctl, }; static struct miscdevice audevrc_in_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_evrc_in", .fops = &audio_evrc_in_fops, }; static int __init audevrc_in_init(void) { mutex_init(&the_audio_evrc_in.lock); mutex_init(&the_audio_evrc_in.read_lock); spin_lock_init(&the_audio_evrc_in.dsp_lock); init_waitqueue_head(&the_audio_evrc_in.wait); init_waitqueue_head(&the_audio_evrc_in.wait_enable); mutex_init(&the_audio_evrc_in.write_lock); init_waitqueue_head(&the_audio_evrc_in.write_wait); return misc_register(&audevrc_in_misc); } device_initcall(audevrc_in_init);
gpl-2.0
MarvinCorro/linux-cmps107
drivers/crypto/qat/qat_common/adf_admin.c
87
13077
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2014 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Contact Information: qat-linux@intel.com BSD LICENSE Copyright(c) 2014 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "icp_qat_fw_init_admin.h" /* Admin Messages Registers */ #define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574) #define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578) #define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_ADMINMSG_LEN 32 static const u8 const_tab[1024] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab, 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e, 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39, 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe, 0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17, 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f, 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; struct adf_admin_comms { dma_addr_t phy_addr; dma_addr_t const_tbl_addr; void *virt_addr; void __iomem *mailbox_addr; struct mutex lock; /* protects adf_admin_comms struct */ }; static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae, void *in, void *out) { struct adf_admin_comms *admin = accel_dev->admin; int offset = ae * ADF_ADMINMSG_LEN * 2; void __iomem *mailbox = admin->mailbox_addr; int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE; int times, received; mutex_lock(&admin->lock); if (ADF_CSR_RD(mailbox, mb_offset) == 1) { mutex_unlock(&admin->lock); return -EAGAIN; } memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN); ADF_CSR_WR(mailbox, mb_offset, 1); received = 0; for (times = 0; times < 50; times++) { msleep(20); if (ADF_CSR_RD(mailbox, mb_offset) == 0) { received = 1; break; } } if (received) memcpy(out, admin->virt_addr + offset + ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN); else dev_err(&GET_DEV(accel_dev), "Failed to send admin msg to accelerator\n"); mutex_unlock(&admin->lock); return received ? 0 : -EFAULT; } static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct icp_qat_fw_init_admin_req req; struct icp_qat_fw_init_admin_resp resp; int i; memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req)); req.init_admin_cmd_id = cmd; if (cmd == ICP_QAT_FW_CONSTANTS_CFG) { req.init_cfg_sz = 1024; req.init_cfg_ptr = accel_dev->admin->const_tbl_addr; } for (i = 0; i < hw_device->get_num_aes(hw_device); i++) { memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp)); if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) || resp.init_resp_hdr.status) return -EFAULT; } return 0; } /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. * * Function sends admin init message to the FW * * Return: 0 on success, error code otherwise. */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME); if (ret) return ret; return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG); } EXPORT_SYMBOL_GPL(adf_send_admin_init); int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = (void __iomem *)((uintptr_t)csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET); u64 reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), (void *) const_tab, 1024, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), admin->const_tbl_addr))) { dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); kfree(admin); return -ENOMEM; } reg_val = (u64)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; } EXPORT_SYMBOL_GPL(adf_init_admin_comms); void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin = accel_dev->admin; if (!admin) return; if (admin->virt_addr) dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, DMA_TO_DEVICE); mutex_destroy(&admin->lock); kfree(admin); accel_dev->admin = NULL; } EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
gpl-2.0
futranbg/ef65l-kernel-2.0
arch/arm/mach-msm/board-msm7x27.c
87
54777
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/power_supply.h> #include <mach/msm_memtypes.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <asm/setup.h> #ifdef CONFIG_CACHE_L2X0 #include <asm/hardware/cache-l2x0.h> #endif #include <asm/mach/mmc.h> #include <mach/vreg.h> #include <mach/mpp.h> #include <mach/board.h> #include <mach/pmic.h> #include <mach/msm_iomap.h> #include <mach/msm_rpcrouter.h> #include <mach/msm_hsusb.h> #include <mach/rpc_hsusb.h> #include <mach/rpc_pmapp.h> #include <mach/msm_serial_hs.h> #include <mach/memory.h> #include <mach/msm_battery.h> #include <mach/rpc_server_handset.h> #include <mach/msm_tsif.h> #include <mach/socinfo.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/i2c.h> #include <linux/android_pmem.h> #include <mach/camera.h> #ifdef CONFIG_USB_G_ANDROID #include <linux/usb/android.h> #include <mach/usbdiag.h> #endif #include "board-msm7627-regulator.h" #include "devices.h" #include "clock.h" #include "acpuclock.h" #include "msm-keypad-devices.h" #include "pm.h" #include "pm-boot.h" #ifdef CONFIG_ARCH_MSM7X25 #define MSM_PMEM_MDP_SIZE 0xb21000 #define MSM_PMEM_ADSP_SIZE 0x97b000 #define MSM_PMEM_AUDIO_SIZE 0x121000 #define MSM_FB_SIZE 0x200000 #define PMEM_KERNEL_EBI1_SIZE 0x64000 #endif #ifdef CONFIG_ARCH_MSM7X27 #define MSM_PMEM_MDP_SIZE 0x1B76000 #define MSM_PMEM_ADSP_SIZE 0xC8A000 #define MSM_PMEM_AUDIO_SIZE 0x5B000 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER #define MSM_FB_SIZE 0x233000 #else #define MSM_FB_SIZE 0x177000 #endif #define PMEM_KERNEL_EBI1_SIZE 0x1C000 #endif static struct resource smc91x_resources[] = { [0] = { .start = 0x9C004300, .end = 0x9C0043ff, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(132), .end = MSM_GPIO_TO_INT(132), .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #ifdef CONFIG_USB_G_ANDROID static struct android_usb_platform_data android_usb_pdata = { .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; #endif #ifdef CONFIG_USB_EHCI_MSM_72K static void msm_hsusb_vbus_power(unsigned phy_info, int on) { if (on) msm_hsusb_vbus_powerup(); else msm_hsusb_vbus_shutdown(); } static struct msm_usb_host_platform_data msm_usb_host_pdata = { .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_65NM), }; static void __init msm7x2x_init_host(void) { if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) return; msm_add_host(0, &msm_usb_host_pdata); } #endif #ifdef CONFIG_USB_MSM_OTG_72K static int hsusb_rpc_connect(int connect) { if (connect) return msm_hsusb_rpc_connect(); else return msm_hsusb_rpc_close(); } #endif #ifdef CONFIG_USB_MSM_OTG_72K static int msm_hsusb_ldo_init(int init) { static struct regulator *reg_hsusb; int rc; if (init) { reg_hsusb = regulator_get(NULL, "usb"); if (IS_ERR(reg_hsusb)) { rc = PTR_ERR(reg_hsusb); pr_err("%s: could not get regulator: %d\n", __func__, rc); goto out; } rc = regulator_set_voltage(reg_hsusb, 3300000, 3300000); if (rc < 0) { pr_err("%s: could not set voltage: %d\n", __func__, rc); goto usb_reg_fail; } rc = regulator_enable(reg_hsusb); if (rc < 0) { pr_err("%s: could not enable regulator: %d\n", __func__, rc); goto usb_reg_fail; } /* * PHY 3.3V analog domain(VDDA33) is powered up by * an always enabled power supply (LP5900TL-3.3). * USB VREG default source is VBUS line. Turning * on USB VREG has a side effect on the USB suspend * current. Hence USB VREG is explicitly turned * off here. */ rc = regulator_disable(reg_hsusb); if (rc < 0) { pr_err("%s: could not disable regulator: %d\n", __func__, rc); goto usb_reg_fail; } regulator_put(reg_hsusb); } return 0; usb_reg_fail: regulator_put(reg_hsusb); out: return rc; } static int msm_hsusb_pmic_notif_init(void (*callback)(int online), int init) { int ret; if (init) { ret = msm_pm_app_rpc_init(callback); } else { msm_pm_app_rpc_deinit(callback); ret = 0; } return ret; } static int msm_otg_rpc_phy_reset(void __iomem *regs) { return msm_hsusb_phy_reset(); } static struct msm_otg_platform_data msm_otg_pdata = { .rpc_connect = hsusb_rpc_connect, .pmic_vbus_notif_init = msm_hsusb_pmic_notif_init, .chg_vbus_draw = hsusb_chg_vbus_draw, .chg_connected = hsusb_chg_connected, .chg_init = hsusb_chg_init, #ifdef CONFIG_USB_EHCI_MSM_72K .vbus_power = msm_hsusb_vbus_power, #endif .ldo_init = msm_hsusb_ldo_init, .pclk_required_during_lpm = 1, }; #ifdef CONFIG_USB_GADGET static struct msm_hsusb_gadget_platform_data msm_gadget_pdata; #endif #endif #define SND(desc, num) { .name = #desc, .id = num } static struct snd_endpoint snd_endpoints_list[] = { SND(HANDSET, 0), SND(MONO_HEADSET, 2), SND(HEADSET, 3), SND(SPEAKER, 6), SND(TTY_HEADSET, 8), SND(TTY_VCO, 9), SND(TTY_HCO, 10), SND(BT, 12), SND(IN_S_SADC_OUT_HANDSET, 16), SND(IN_S_SADC_OUT_SPEAKER_PHONE, 25), SND(CURRENT, 27), }; #undef SND static struct msm_snd_endpoints msm_device_snd_endpoints = { .endpoints = snd_endpoints_list, .num = sizeof(snd_endpoints_list) / sizeof(struct snd_endpoint) }; static struct platform_device msm_device_snd = { .name = "msm_snd", .id = -1, .dev = { .platform_data = &msm_device_snd_endpoints }, }; #define DEC0_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #ifdef CONFIG_ARCH_MSM7X25 #define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_WAV)|(1<<MSM_ADSP_CODEC_ADPCM)| \ (1<<MSM_ADSP_CODEC_YADPCM)|(1<<MSM_ADSP_CODEC_QCELP)| \ (1<<MSM_ADSP_CODEC_MP3)) #define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_WAV)|(1<<MSM_ADSP_CODEC_ADPCM)| \ (1<<MSM_ADSP_CODEC_YADPCM)|(1<<MSM_ADSP_CODEC_QCELP)| \ (1<<MSM_ADSP_CODEC_MP3)) #define DEC3_FORMAT 0 #define DEC4_FORMAT 0 #else #define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC3_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC4_FORMAT (1<<MSM_ADSP_CODEC_MIDI) #endif static unsigned int dec_concurrency_table[] = { /* Audio LP */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DMA)), 0, 0, 0, 0, /* Concurrency 1 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 2 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 3 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 4 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 5 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 6 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), 0, 0, 0, 0, /* Concurrency 7 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), }; #define DEC_INFO(name, queueid, decid, nr_codec) { .module_name = name, \ .module_queueid = queueid, .module_decid = decid, \ .nr_codec_support = nr_codec} static struct msm_adspdec_info dec_info_list[] = { DEC_INFO("AUDPLAY0TASK", 13, 0, 11), /* AudPlay0BitStreamCtrlQueue */ #ifdef CONFIG_ARCH_MSM7X25 DEC_INFO("AUDPLAY1TASK", 14, 1, 5), /* AudPlay1BitStreamCtrlQueue */ DEC_INFO("AUDPLAY2TASK", 15, 2, 5), /* AudPlay2BitStreamCtrlQueue */ DEC_INFO("AUDPLAY3TASK", 16, 3, 0), /* AudPlay3BitStreamCtrlQueue */ DEC_INFO("AUDPLAY4TASK", 17, 4, 0), /* AudPlay4BitStreamCtrlQueue */ #else DEC_INFO("AUDPLAY1TASK", 14, 1, 11), /* AudPlay1BitStreamCtrlQueue */ DEC_INFO("AUDPLAY2TASK", 15, 2, 11), /* AudPlay2BitStreamCtrlQueue */ DEC_INFO("AUDPLAY3TASK", 16, 3, 11), /* AudPlay3BitStreamCtrlQueue */ DEC_INFO("AUDPLAY4TASK", 17, 4, 1), /* AudPlay4BitStreamCtrlQueue */ #endif }; static struct msm_adspdec_database msm_device_adspdec_database = { .num_dec = ARRAY_SIZE(dec_info_list), .num_concurrency_support = (ARRAY_SIZE(dec_concurrency_table) / \ ARRAY_SIZE(dec_info_list)), .dec_concurrency_table = dec_concurrency_table, .dec_info_list = dec_info_list, }; static struct platform_device msm_device_adspdec = { .name = "msm_adspdec", .id = -1, .dev = { .platform_data = &msm_device_adspdec_database }, }; static struct android_pmem_platform_data android_pmem_pdata = { .name = "pmem", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 1, .memory_type = MEMTYPE_EBI1, }; static struct android_pmem_platform_data android_pmem_adsp_pdata = { .name = "pmem_adsp", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, .memory_type = MEMTYPE_EBI1, }; static struct android_pmem_platform_data android_pmem_audio_pdata = { .name = "pmem_audio", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_device = { .name = "android_pmem", .id = 0, .dev = { .platform_data = &android_pmem_pdata }, }; static struct platform_device android_pmem_adsp_device = { .name = "android_pmem", .id = 1, .dev = { .platform_data = &android_pmem_adsp_pdata }, }; static struct platform_device android_pmem_audio_device = { .name = "android_pmem", .id = 2, .dev = { .platform_data = &android_pmem_audio_pdata }, }; static struct msm_handset_platform_data hs_platform_data = { .hs_name = "7k_handset", .pwr_key_delay_ms = 500, /* 0 will disable end key */ }; static struct platform_device hs_device = { .name = "msm-handset", .id = -1, .dev = { .platform_data = &hs_platform_data, }, }; /* TSIF begin */ #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define TSIF_B_SYNC GPIO_CFG(87, 5, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_DATA GPIO_CFG(86, 3, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_EN GPIO_CFG(85, 3, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) #define TSIF_B_CLK GPIO_CFG(84, 4, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA) static const struct msm_gpio tsif_gpios[] = { { .gpio_cfg = TSIF_B_CLK, .label = "tsif_clk", }, { .gpio_cfg = TSIF_B_EN, .label = "tsif_en", }, { .gpio_cfg = TSIF_B_DATA, .label = "tsif_data", }, { .gpio_cfg = TSIF_B_SYNC, .label = "tsif_sync", }, }; static struct msm_tsif_platform_data tsif_platform_data = { .num_gpios = ARRAY_SIZE(tsif_gpios), .gpios = tsif_gpios, .tsif_clk = "core_clk", .tsif_pclk = "iface_clk", .tsif_ref_clk = "ref_clk", }; #endif /* defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) */ /* TSIF end */ #define LCDC_CONFIG_PROC 21 #define LCDC_UN_CONFIG_PROC 22 #define LCDC_API_PROG 0x30000066 #define LCDC_API_VERS 0x00010001 #define GPIO_OUT_132 132 #define GPIO_OUT_131 131 #define GPIO_OUT_103 103 #define GPIO_OUT_102 102 #define GPIO_OUT_88 88 static struct msm_rpc_endpoint *lcdc_ep; static int msm_fb_lcdc_config(int on) { int rc = 0; struct rpc_request_hdr hdr; if (on) pr_info("lcdc config\n"); else pr_info("lcdc un-config\n"); lcdc_ep = msm_rpc_connect_compatible(LCDC_API_PROG, LCDC_API_VERS, 0); if (IS_ERR(lcdc_ep)) { printk(KERN_ERR "%s: msm_rpc_connect failed! rc = %ld\n", __func__, PTR_ERR(lcdc_ep)); return -EINVAL; } rc = msm_rpc_call(lcdc_ep, (on) ? LCDC_CONFIG_PROC : LCDC_UN_CONFIG_PROC, &hdr, sizeof(hdr), 5 * HZ); if (rc) printk(KERN_ERR "%s: msm_rpc_call failed! rc = %d\n", __func__, rc); msm_rpc_close(lcdc_ep); return rc; } static int gpio_array_num[] = { GPIO_OUT_132, /* spi_clk */ GPIO_OUT_131, /* spi_cs */ GPIO_OUT_103, /* spi_sdi */ GPIO_OUT_102, /* spi_sdoi */ GPIO_OUT_88 }; static void lcdc_gordon_gpio_init(void) { if (gpio_request(GPIO_OUT_132, "spi_clk")) pr_err("failed to request gpio spi_clk\n"); if (gpio_request(GPIO_OUT_131, "spi_cs")) pr_err("failed to request gpio spi_cs\n"); if (gpio_request(GPIO_OUT_103, "spi_sdi")) pr_err("failed to request gpio spi_sdi\n"); if (gpio_request(GPIO_OUT_102, "spi_sdoi")) pr_err("failed to request gpio spi_sdoi\n"); if (gpio_request(GPIO_OUT_88, "gpio_dac")) pr_err("failed to request gpio_dac\n"); } static uint32_t lcdc_gpio_table[] = { GPIO_CFG(GPIO_OUT_132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_OUT_131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_OUT_103, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_OUT_102, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_OUT_88, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static void config_lcdc_gpio_table(uint32_t *table, int len, unsigned enable) { int n, rc; for (n = 0; n < len; n++) { rc = gpio_tlmm_config(table[n], enable ? GPIO_CFG_ENABLE : GPIO_CFG_DISABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, table[n], rc); break; } } } static void lcdc_gordon_config_gpios(int enable) { config_lcdc_gpio_table(lcdc_gpio_table, ARRAY_SIZE(lcdc_gpio_table), enable); } static char *msm_fb_lcdc_vreg[] = { "gp5" }; static int msm_fb_lcdc_power_save(int on) { int i, rc = 0; static struct regulator *vreg[ARRAY_SIZE(msm_fb_lcdc_vreg)]; if (on) { for (i = 0; i < ARRAY_SIZE(msm_fb_lcdc_vreg); i++) { vreg[i] = regulator_get(NULL, msm_fb_lcdc_vreg[i]); if (IS_ERR(vreg[i])) { rc = PTR_ERR(vreg[i]); pr_err("%s: could get not regulator: %d\n", __func__, rc); goto reg_get_fail; } rc = regulator_set_voltage(vreg[i], 2850000, 3000000); if (rc < 0) { pr_err("%s: could not set voltage: %d\n", __func__, rc); goto reg_get_fail; } } } for (i = 0; i < ARRAY_SIZE(msm_fb_lcdc_vreg); i++) { if (on) { rc = regulator_enable(vreg[i]); if (rc) { pr_err("%s: could not enable regulator %s:" "%d\n", __func__, msm_fb_lcdc_vreg[i], rc); goto vreg_lcdc_fail; } } else { rc = regulator_disable(vreg[i]); if (rc) { pr_err("%s: could not disable regulator %s:" "%d\n", __func__, msm_fb_lcdc_vreg[i], rc); regulator_put(vreg[i]); goto vreg_lcdc_fail; } regulator_put(vreg[i]); rc = gpio_tlmm_config(GPIO_CFG(GPIO_OUT_88, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (rc) printk(KERN_ERR "gpio_tlmm_config failed\n"); gpio_set_value(88, 0); mdelay(15); gpio_set_value(88, 1); mdelay(15); } } return rc; reg_get_fail: for (; i > 0; i--) regulator_put(vreg[i - 1]); return rc; vreg_lcdc_fail: if (on) { for (; i > 0; i--) regulator_disable(vreg[i - 1]); } else { for (; i > 0; i--) regulator_enable(vreg[i - 1]); } return rc; } static struct lcdc_platform_data lcdc_pdata = { .lcdc_gpio_config = msm_fb_lcdc_config, .lcdc_power_save = msm_fb_lcdc_power_save, }; static struct msm_panel_common_pdata lcdc_gordon_panel_data = { .panel_config_gpio = lcdc_gordon_config_gpios, .gpio_num = gpio_array_num, }; static struct platform_device lcdc_gordon_panel_device = { .name = "lcdc_gordon_vga", .id = 0, .dev = { .platform_data = &lcdc_gordon_panel_data, } }; static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static int msm_fb_detect_panel(const char *name) { int ret = -EPERM; if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) { if (!strcmp(name, "lcdc_gordon_vga")) ret = 0; else ret = -ENODEV; } return ret; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, .mddi_prescan = 1, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; #ifdef CONFIG_BT static struct platform_device msm_bt_power_device = { .name = "bt_power", }; enum { BT_WAKE, BT_RFR, BT_CTS, BT_RX, BT_TX, BT_PCM_DOUT, BT_PCM_DIN, BT_PCM_SYNC, BT_PCM_CLK, BT_HOST_WAKE, }; static unsigned bt_config_power_on[] = { GPIO_CFG(42, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* WAKE */ GPIO_CFG(43, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* RFR */ GPIO_CFG(44, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* CTS */ GPIO_CFG(45, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* Rx */ GPIO_CFG(46, 3, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* Tx */ GPIO_CFG(68, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DOUT */ GPIO_CFG(69, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_DIN */ GPIO_CFG(70, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_SYNC */ GPIO_CFG(71, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* PCM_CLK */ GPIO_CFG(83, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* HOST_WAKE */ }; static unsigned bt_config_power_off[] = { GPIO_CFG(42, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* WAKE */ GPIO_CFG(43, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* RFR */ GPIO_CFG(44, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* CTS */ GPIO_CFG(45, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* Rx */ GPIO_CFG(46, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* Tx */ GPIO_CFG(68, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCM_DOUT */ GPIO_CFG(69, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCM_DIN */ GPIO_CFG(70, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCM_SYNC */ GPIO_CFG(71, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCM_CLK */ GPIO_CFG(83, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HOST_WAKE */ }; static int bluetooth_power(int on) { int pin, rc; static struct regulator *vreg_bt; printk(KERN_DEBUG "%s\n", __func__); /* do not have vreg bt defined, gp6 is the same */ /* vreg_get parameter 1 (struct device *) is ignored */ if (on) { for (pin = 0; pin < ARRAY_SIZE(bt_config_power_on); pin++) { rc = gpio_tlmm_config(bt_config_power_on[pin], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, bt_config_power_on[pin], rc); return -EIO; } } vreg_bt = regulator_get(NULL, "gp6"); if (IS_ERR(vreg_bt)) { rc = PTR_ERR(vreg_bt); pr_err("%s: could get not regulator: %d\n", __func__, rc); goto out; } /* units of mV, steps of 50 mV */ rc = regulator_set_voltage(vreg_bt, 2600000, 2600000); if (rc < 0) { pr_err("%s: could not set voltage: %d\n", __func__, rc); goto bt_vreg_fail; } rc = regulator_enable(vreg_bt); if (rc < 0) { pr_err("%s: could not enable regulator: %d\n", __func__, rc); goto bt_vreg_fail; } } else { rc = regulator_disable(vreg_bt); if (rc < 0) { pr_err("%s: could not disable regulator: %d\n", __func__, rc); goto bt_vreg_fail; } regulator_put(vreg_bt); for (pin = 0; pin < ARRAY_SIZE(bt_config_power_off); pin++) { rc = gpio_tlmm_config(bt_config_power_off[pin], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, bt_config_power_off[pin], rc); return -EIO; } } } return 0; bt_vreg_fail: regulator_put(vreg_bt); out: return rc; } static void __init bt_power_init(void) { msm_bt_power_device.dev.platform_data = &bluetooth_power; } #else #define bt_power_init(x) do {} while (0) #endif static struct platform_device msm_device_pmic_leds = { .name = "pmic-leds", .id = -1, }; static struct resource bluesleep_resources[] = { { .name = "gpio_host_wake", .start = 83, .end = 83, .flags = IORESOURCE_IO, }, { .name = "gpio_ext_wake", .start = 42, .end = 42, .flags = IORESOURCE_IO, }, { .name = "host_wake", .start = MSM_GPIO_TO_INT(83), .end = MSM_GPIO_TO_INT(83), .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_bluesleep_device = { .name = "bluesleep", .id = -1, .num_resources = ARRAY_SIZE(bluesleep_resources), .resource = bluesleep_resources, }; static struct i2c_board_info i2c_devices[] = { #ifdef CONFIG_MT9D112 { I2C_BOARD_INFO("mt9d112", 0x78 >> 1), }, #endif #ifdef CONFIG_S5K3E2FX { I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1), }, #endif #ifdef CONFIG_MT9P012 { I2C_BOARD_INFO("mt9p012", 0x6C >> 1), }, #endif #ifdef CONFIG_MT9P012_KM { I2C_BOARD_INFO("mt9p012_km", 0x6C >> 2), }, #endif #if defined(CONFIG_MT9T013) || defined(CONFIG_SENSORS_MT9T013) { I2C_BOARD_INFO("mt9t013", 0x6C), }, #endif #ifdef CONFIG_VB6801 { I2C_BOARD_INFO("vb6801", 0x20), }, #endif }; #ifdef CONFIG_MSM_CAMERA static uint32_t camera_off_gpio_table[] = { /* parallel CAMERA interfaces */ GPIO_CFG(0, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT0 */ GPIO_CFG(1, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT1 */ GPIO_CFG(2, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(3, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(4, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(5, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(6, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(7, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(8, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(9, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(10, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(11, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(12, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* PCLK */ GPIO_CFG(13, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(14, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(15, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* MCLK */ }; static uint32_t camera_on_gpio_table[] = { /* parallel CAMERA interfaces */ GPIO_CFG(0, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT0 */ GPIO_CFG(1, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT1 */ GPIO_CFG(2, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT2 */ GPIO_CFG(3, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT3 */ GPIO_CFG(4, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT4 */ GPIO_CFG(5, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT5 */ GPIO_CFG(6, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT6 */ GPIO_CFG(7, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT7 */ GPIO_CFG(8, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT8 */ GPIO_CFG(9, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT9 */ GPIO_CFG(10, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT10 */ GPIO_CFG(11, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* DAT11 */ GPIO_CFG(12, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_16MA), /* PCLK */ GPIO_CFG(13, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* HSYNC_IN */ GPIO_CFG(14, 1, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), /* VSYNC_IN */ GPIO_CFG(15, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_16MA), /* MCLK */ }; static void config_gpio_table(uint32_t *table, int len) { int n, rc; for (n = 0; n < len; n++) { rc = gpio_tlmm_config(table[n], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, table[n], rc); break; } } } static void msm_camera_vreg_config(int vreg_en) { int rc; static struct regulator *vreg_gp2; static struct regulator *vreg_gp3; if (vreg_gp2 == NULL && vreg_gp3 == NULL) { vreg_gp2 = regulator_get(NULL, "gp2"); if (IS_ERR(vreg_gp2)) { rc = PTR_ERR(vreg_gp2); pr_err("%s: could not get regulator: %d\n", __func__, rc); return; } rc = regulator_set_voltage(vreg_gp2, 1800000, 1800000); if (rc < 0) { pr_err("%s: could not set voltage: %d\n", __func__, rc); goto cam_vreg_fail; } vreg_gp3 = regulator_get(NULL, "gp3"); if (IS_ERR(vreg_gp3)) { rc = PTR_ERR(vreg_gp3); pr_err("%s: could not get regulator: %d\n", __func__, rc); goto cam_vreg_fail; } rc = regulator_set_voltage(vreg_gp3, 2850000, 2850000); if (rc < 0) { pr_err("%s: could not set voltage: %d\n", __func__, rc); goto cam_vreg2_fail; } return; } if (vreg_gp2 == NULL || vreg_gp3 == NULL) { pr_err("Camera Regulators are not initialized\n"); return; } if (vreg_en) { rc = regulator_enable(vreg_gp2); if (rc) { pr_err("%s: could not enable regulator: %d\n", __func__, rc); goto cam_vreg2_fail; } rc = regulator_enable(vreg_gp3); if (rc) { pr_err("%s: could not enable regulator: %d\n", __func__, rc); goto vreg_gp3_fail; } } else { rc = regulator_disable(vreg_gp2); if (rc) { pr_err("%s: could not disable regulator: %d\n", __func__, rc); return; } rc = regulator_disable(vreg_gp3); if (rc) { pr_err("%s: could not disable regulator: %d\n", __func__, rc); goto cam_vreg2_fail; } } return; vreg_gp3_fail: if (vreg_en) regulator_disable(vreg_gp2); cam_vreg2_fail: regulator_put(vreg_gp3); cam_vreg_fail: regulator_put(vreg_gp2); vreg_gp3 = NULL; vreg_gp2 = NULL; } static int config_camera_on_gpios(void) { int vreg_en = 1; if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) msm_camera_vreg_config(vreg_en); config_gpio_table(camera_on_gpio_table, ARRAY_SIZE(camera_on_gpio_table)); return 0; } static void config_camera_off_gpios(void) { int vreg_en = 0; if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) msm_camera_vreg_config(vreg_en); config_gpio_table(camera_off_gpio_table, ARRAY_SIZE(camera_off_gpio_table)); } static struct msm_camera_device_platform_data msm_camera_device_data = { .camera_gpio_on = config_camera_on_gpios, .camera_gpio_off = config_camera_off_gpios, .ioext.mdcphy = MSM_MDC_PHYS, .ioext.mdcsz = MSM_MDC_SIZE, .ioext.appphy = MSM_CLK_CTL_PHYS, .ioext.appsz = MSM_CLK_CTL_SIZE, }; int pmic_set_flash_led_current(enum pmic8058_leds id, unsigned mA) { int rc; rc = pmic_flash_led_set_current(mA); return rc; } static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_PMIC, ._fsrc.pmic_src.num_of_src = 1, ._fsrc.pmic_src.low_current = 30, ._fsrc.pmic_src.high_current = 100, ._fsrc.pmic_src.led_src_1 = 0, ._fsrc.pmic_src.led_src_2 = 0, ._fsrc.pmic_src.pmic_set_current = pmic_set_flash_led_current, }; #ifdef CONFIG_MT9D112 static struct msm_camera_sensor_flash_data flash_mt9d112 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_mt9d112_data = { .sensor_name = "mt9d112", .sensor_reset = 89, .sensor_pwd = 85, .vcm_pwd = 0, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_mt9d112 }; static struct platform_device msm_camera_sensor_mt9d112 = { .name = "msm_camera_mt9d112", .dev = { .platform_data = &msm_camera_sensor_mt9d112_data, }, }; #endif #ifdef CONFIG_S5K3E2FX static struct msm_camera_sensor_flash_data flash_s5k3e2fx = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3e2fx_data = { .sensor_name = "s5k3e2fx", .sensor_reset = 89, .sensor_pwd = 85, .vcm_pwd = 0, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_s5k3e2fx }; static struct platform_device msm_camera_sensor_s5k3e2fx = { .name = "msm_camera_s5k3e2fx", .dev = { .platform_data = &msm_camera_sensor_s5k3e2fx_data, }, }; #endif #ifdef CONFIG_MT9P012 static struct msm_camera_sensor_flash_data flash_mt9p012 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_mt9p012_data = { .sensor_name = "mt9p012", .sensor_reset = 89, .sensor_pwd = 85, .vcm_pwd = 88, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_mt9p012 }; static struct platform_device msm_camera_sensor_mt9p012 = { .name = "msm_camera_mt9p012", .dev = { .platform_data = &msm_camera_sensor_mt9p012_data, }, }; #endif #ifdef CONFIG_MT9P012_KM static struct msm_camera_sensor_flash_data flash_mt9p012_km = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_mt9p012_km_data = { .sensor_name = "mt9p012_km", .sensor_reset = 89, .sensor_pwd = 85, .vcm_pwd = 88, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_mt9p012_km }; static struct platform_device msm_camera_sensor_mt9p012_km = { .name = "msm_camera_mt9p012_km", .dev = { .platform_data = &msm_camera_sensor_mt9p012_km_data, }, }; #endif #ifdef CONFIG_MT9T013 static struct msm_camera_sensor_flash_data flash_mt9t013 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_mt9t013_data = { .sensor_name = "mt9t013", .sensor_reset = 89, .sensor_pwd = 85, .vcm_pwd = 0, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_mt9t013 }; static struct platform_device msm_camera_sensor_mt9t013 = { .name = "msm_camera_mt9t013", .dev = { .platform_data = &msm_camera_sensor_mt9t013_data, }, }; #endif #ifdef CONFIG_VB6801 static struct msm_camera_sensor_flash_data flash_vb6801 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_sensor_info msm_camera_sensor_vb6801_data = { .sensor_name = "vb6801", .sensor_reset = 89, .sensor_pwd = 88, .vcm_pwd = 0, .vcm_enable = 0, .pdata = &msm_camera_device_data, .flash_data = &flash_vb6801 }; static struct platform_device msm_camera_sensor_vb6801 = { .name = "msm_camera_vb6801", .dev = { .platform_data = &msm_camera_sensor_vb6801_data, }, }; #endif #endif static u32 msm_calculate_batt_capacity(u32 current_voltage); static struct msm_psy_batt_pdata msm_psy_batt_data = { .voltage_min_design = 2800, .voltage_max_design = 4300, .avail_chg_sources = AC_CHG | USB_CHG , .batt_technology = POWER_SUPPLY_TECHNOLOGY_LION, .calculate_capacity = &msm_calculate_batt_capacity, }; static u32 msm_calculate_batt_capacity(u32 current_voltage) { u32 low_voltage = msm_psy_batt_data.voltage_min_design; u32 high_voltage = msm_psy_batt_data.voltage_max_design; return (current_voltage - low_voltage) * 100 / (high_voltage - low_voltage); } static struct platform_device msm_batt_device = { .name = "msm-battery", .id = -1, .dev.platform_data = &msm_psy_batt_data, }; static struct platform_device *devices[] __initdata = { &asoc_msm_pcm, &asoc_msm_dai0, &asoc_msm_dai1, &msm_device_smd, &msm_device_dmov, &msm_device_nand, #ifdef CONFIG_USB_MSM_OTG_72K &msm_device_otg, #ifdef CONFIG_USB_GADGET &msm_device_gadget_peripheral, #endif #endif #ifdef CONFIG_USB_G_ANDROID &android_usb_device, #endif &msm_device_i2c, &smc91x_device, &msm_device_tssc, &android_pmem_device, &android_pmem_adsp_device, &android_pmem_audio_device, &msm_fb_device, &lcdc_gordon_panel_device, &msm_device_uart_dm1, #ifdef CONFIG_BT &msm_bt_power_device, #endif &msm_device_pmic_leds, &msm_device_snd, &msm_device_adspdec, #ifdef CONFIG_MT9T013 &msm_camera_sensor_mt9t013, #endif #ifdef CONFIG_MT9D112 &msm_camera_sensor_mt9d112, #endif #ifdef CONFIG_S5K3E2FX &msm_camera_sensor_s5k3e2fx, #endif #ifdef CONFIG_MT9P012 &msm_camera_sensor_mt9p012, #endif #ifdef CONFIG_MT9P012_KM &msm_camera_sensor_mt9p012_km, #endif #ifdef CONFIG_VB6801 &msm_camera_sensor_vb6801, #endif &msm_bluesleep_device, #ifdef CONFIG_ARCH_MSM7X27 &msm_kgsl_3d0, #endif #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) &msm_device_tsif, #endif &hs_device, &msm_batt_device, }; static struct msm_panel_common_pdata mdp_pdata = { .gpio = 97, .mdp_rev = MDP_REV_30, }; static void __init msm_fb_add_devices(void) { msm_fb_register_device("mdp", &mdp_pdata); msm_fb_register_device("pmdh", 0); msm_fb_register_device("lcdc", &lcdc_pdata); } extern struct sys_timer msm_timer; static void __init msm7x2x_init_irq(void) { msm_init_irq(); } void msm_serial_debug_init(unsigned int base, int irq, struct device *clk_device, int signal_irq); #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC4_SUPPORT)) static unsigned long vreg_sts, gpio_sts; static struct regulator *vreg_mmc; static unsigned mpp_mmc = 2; struct sdcc_gpio { struct msm_gpio *cfg_data; uint32_t size; struct msm_gpio *sleep_cfg_data; }; static struct msm_gpio sdc1_cfg_data[] = { {GPIO_CFG(51, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_3"}, {GPIO_CFG(52, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_2"}, {GPIO_CFG(53, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_1"}, {GPIO_CFG(54, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_dat_0"}, {GPIO_CFG(55, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc1_cmd"}, {GPIO_CFG(56, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc1_clk"}, }; static struct msm_gpio sdc2_cfg_data[] = { {GPIO_CFG(62, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc2_clk"}, {GPIO_CFG(63, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_cmd"}, {GPIO_CFG(64, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_3"}, {GPIO_CFG(65, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_2"}, {GPIO_CFG(66, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_1"}, {GPIO_CFG(67, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc2_dat_0"}, }; static struct msm_gpio sdc2_sleep_cfg_data[] = { {GPIO_CFG(62, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_clk"}, {GPIO_CFG(63, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_cmd"}, {GPIO_CFG(64, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_dat_3"}, {GPIO_CFG(65, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_dat_2"}, {GPIO_CFG(66, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_dat_1"}, {GPIO_CFG(67, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), "sdc2_dat_0"}, }; static struct msm_gpio sdc3_cfg_data[] = { {GPIO_CFG(88, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc3_clk"}, {GPIO_CFG(89, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_cmd"}, {GPIO_CFG(90, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_3"}, {GPIO_CFG(91, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_2"}, {GPIO_CFG(92, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_1"}, {GPIO_CFG(93, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc3_dat_0"}, }; static struct msm_gpio sdc4_cfg_data[] = { {GPIO_CFG(19, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_3"}, {GPIO_CFG(20, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_2"}, {GPIO_CFG(21, 4, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_1"}, {GPIO_CFG(107, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_cmd"}, {GPIO_CFG(108, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), "sdc4_dat_0"}, {GPIO_CFG(109, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc4_clk"}, }; static struct sdcc_gpio sdcc_cfg_data[] = { { .cfg_data = sdc1_cfg_data, .size = ARRAY_SIZE(sdc1_cfg_data), .sleep_cfg_data = NULL, }, { .cfg_data = sdc2_cfg_data, .size = ARRAY_SIZE(sdc2_cfg_data), .sleep_cfg_data = sdc2_sleep_cfg_data, }, { .cfg_data = sdc3_cfg_data, .size = ARRAY_SIZE(sdc3_cfg_data), .sleep_cfg_data = NULL, }, { .cfg_data = sdc4_cfg_data, .size = ARRAY_SIZE(sdc4_cfg_data), .sleep_cfg_data = NULL, }, }; static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable) { int rc = 0; struct sdcc_gpio *curr; curr = &sdcc_cfg_data[dev_id - 1]; if (!(test_bit(dev_id, &gpio_sts)^enable)) return; if (enable) { set_bit(dev_id, &gpio_sts); rc = msm_gpios_request_enable(curr->cfg_data, curr->size); if (rc) printk(KERN_ERR "%s: Failed to turn on GPIOs for slot %d\n", __func__, dev_id); } else { clear_bit(dev_id, &gpio_sts); if (curr->sleep_cfg_data) { msm_gpios_enable(curr->sleep_cfg_data, curr->size); msm_gpios_free(curr->sleep_cfg_data, curr->size); return; } msm_gpios_disable_free(curr->cfg_data, curr->size); } } static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd) { int rc = 0; struct platform_device *pdev; pdev = container_of(dv, struct platform_device, dev); msm_sdcc_setup_gpio(pdev->id, !!vdd); if (vdd == 0) { if (!vreg_sts) return 0; clear_bit(pdev->id, &vreg_sts); if (!vreg_sts) { if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) { rc = mpp_config_digital_out(mpp_mmc, MPP_CFG(MPP_DLOGIC_LVL_MSMP, MPP_DLOGIC_OUT_CTRL_LOW)); } else rc = regulator_disable(vreg_mmc); if (rc) { pr_err("%s: return val: %d\n", __func__, rc); } } return 0; } if (!vreg_sts) { if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) { rc = mpp_config_digital_out(mpp_mmc, MPP_CFG(MPP_DLOGIC_LVL_MSMP, MPP_DLOGIC_OUT_CTRL_HIGH)); } else { rc = regulator_set_voltage(vreg_mmc, 2850000, 2850000); if (!rc) rc = regulator_enable(vreg_mmc); } if (rc) { pr_err("%s: return val: %d\n", __func__, rc); } } set_bit(pdev->id, &vreg_sts); return 0; } #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static struct mmc_platform_data msm7x2x_sdc1_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static struct mmc_platform_data msm7x2x_sdc2_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .sdiowakeup_irq = MSM_GPIO_TO_INT(66), .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static struct mmc_platform_data msm7x2x_sdc3_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT static struct mmc_platform_data msm7x2x_sdc4_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 0, }; #endif static void __init msm7x2x_init_mmc(void) { if (!machine_is_msm7x25_ffa() && !machine_is_msm7x27_ffa()) { vreg_mmc = regulator_get(NULL, "mmc"); if (IS_ERR(vreg_mmc)) { pr_err("%s: could not get regulator: %ld\n", __func__, PTR_ERR(vreg_mmc)); } } #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT msm_add_sdcc(1, &msm7x2x_sdc1_data); #endif if (machine_is_msm7x25_surf() || machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) { #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT msm_sdcc_setup_gpio(2, 1); msm_add_sdcc(2, &msm7x2x_sdc2_data); #endif } if (machine_is_msm7x25_surf() || machine_is_msm7x27_surf()) { #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT msm_add_sdcc(3, &msm7x2x_sdc3_data); #endif #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT msm_add_sdcc(4, &msm7x2x_sdc4_data); #endif } } #else #define msm7x2x_init_mmc() do {} while (0) #endif static struct msm_pm_platform_data msm7x25_pm_data[MSM_PM_SLEEP_MODE_NR] = { [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000, }; static struct msm_pm_platform_data msm7x27_pm_data[MSM_PM_SLEEP_MODE_NR] = { [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 16000, .residency = 20000, }, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 12000, .residency = 20000, }, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 2000, .residency = 0, }, }; static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = { .mode = MSM_PM_BOOT_CONFIG_RESET_VECTOR_PHYS, .p_addr = 0, }; static void msm_i2c_gpio_config(int iface, int config_type) { int gpio_scl; int gpio_sda; if (iface) { gpio_scl = 95; gpio_sda = 96; } else { gpio_scl = 60; gpio_sda = 61; } if (config_type) { gpio_tlmm_config(GPIO_CFG(gpio_scl, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(gpio_sda, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); } else { gpio_tlmm_config(GPIO_CFG(gpio_scl, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(gpio_sda, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_16MA), GPIO_CFG_ENABLE); } } static struct msm_i2c_platform_data msm_i2c_pdata = { .clk_freq = 100000, .rmutex = 0, .pri_clk = 60, .pri_dat = 61, .aux_clk = 95, .aux_dat = 96, .msm_i2c_config_gpio = msm_i2c_gpio_config, }; static struct platform_device msm_proccomm_regulator_dev = { .name = PROCCOMM_REGULATOR_DEV_NAME, .id = -1, .dev = { .platform_data = &msm7627_proccomm_regulator_data } }; static void __init msm7627_init_regulators(void) { int rc = platform_device_register(&msm_proccomm_regulator_dev); if (rc) pr_err("%s: could not register regulator device: %d\n", __func__, rc); } static void __init msm_device_i2c_init(void) { if (gpio_request(60, "i2c_pri_clk")) pr_err("failed to request gpio i2c_pri_clk\n"); if (gpio_request(61, "i2c_pri_dat")) pr_err("failed to request gpio i2c_pri_dat\n"); if (gpio_request(95, "i2c_sec_clk")) pr_err("failed to request gpio i2c_sec_clk\n"); if (gpio_request(96, "i2c_sec_dat")) pr_err("failed to request gpio i2c_sec_dat\n"); if (cpu_is_msm7x27()) msm_i2c_pdata.pm_lat = msm7x27_pm_data[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] .latency; else msm_i2c_pdata.pm_lat = msm7x25_pm_data[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] .latency; msm_device_i2c.dev.platform_data = &msm_i2c_pdata; } static void usb_mpp_init(void) { unsigned rc; unsigned mpp_usb = 7; if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) { rc = mpp_config_digital_out(mpp_usb, MPP_CFG(MPP_DLOGIC_LVL_VDD, MPP_DLOGIC_OUT_CTRL_HIGH)); if (rc) pr_err("%s: configuring mpp pin" "to enable 3.3V LDO failed\n", __func__); } } static void msm7x27_wlan_init(void) { int rc = 0; /* TBD: if (machine_is_msm7x27_ffa_with_wcn1312()) */ if (machine_is_msm7x27_ffa()) { rc = mpp_config_digital_out(3, MPP_CFG(MPP_DLOGIC_LVL_MSMP, MPP_DLOGIC_OUT_CTRL_LOW)); if (rc) printk(KERN_ERR "%s: return val: %d \n", __func__, rc); } } static void __init msm7x2x_init(void) { msm7627_init_regulators(); #ifdef CONFIG_ARCH_MSM7X25 msm_clock_init(msm_clocks_7x25, msm_num_clocks_7x25); #elif defined(CONFIG_ARCH_MSM7X27) msm_clock_init(&msm7x27_clock_init_data); #endif #if defined(CONFIG_SMC91X) if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) { smc91x_resources[0].start = 0x98000300; smc91x_resources[0].end = 0x980003ff; smc91x_resources[1].start = MSM_GPIO_TO_INT(85); smc91x_resources[1].end = MSM_GPIO_TO_INT(85); if (gpio_tlmm_config(GPIO_CFG(85, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE)) { printk(KERN_ERR "%s: Err: Config GPIO-85 INT\n", __func__); } } #endif acpuclk_init(&acpuclk_7x27_soc_data); usb_mpp_init(); #ifdef CONFIG_USB_MSM_OTG_72K msm_device_otg.dev.platform_data = &msm_otg_pdata; if (machine_is_msm7x25_surf() || machine_is_msm7x25_ffa()) { msm_otg_pdata.pemp_level = PRE_EMPHASIS_WITH_20_PERCENT; msm_otg_pdata.drv_ampl = HS_DRV_AMPLITUDE_5_PERCENT; msm_otg_pdata.cdr_autoreset = CDR_AUTO_RESET_ENABLE; msm_otg_pdata.phy_reset = msm_otg_rpc_phy_reset; } if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) { msm_otg_pdata.pemp_level = PRE_EMPHASIS_WITH_10_PERCENT; msm_otg_pdata.drv_ampl = HS_DRV_AMPLITUDE_5_PERCENT; msm_otg_pdata.cdr_autoreset = CDR_AUTO_RESET_DISABLE; msm_otg_pdata.phy_reset_sig_inverted = 1; } #ifdef CONFIG_USB_GADGET msm_otg_pdata.swfi_latency = msm7x27_pm_data [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency; msm_device_gadget_peripheral.dev.platform_data = &msm_gadget_pdata; msm_gadget_pdata.is_phy_status_timer_on = 1; #endif #endif #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) msm_device_tsif.dev.platform_data = &tsif_platform_data; #endif platform_add_devices(msm_footswitch_devices, msm_num_footswitch_devices); platform_add_devices(devices, ARRAY_SIZE(devices)); #ifdef CONFIG_MSM_CAMERA config_camera_off_gpios(); /* might not be necessary */ #endif msm_device_i2c_init(); i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices)); #ifdef CONFIG_SURF_FFA_GPIO_KEYPAD if (machine_is_msm7x25_ffa() || machine_is_msm7x27_ffa()) platform_device_register(&keypad_device_7k_ffa); else platform_device_register(&keypad_device_surf); #endif lcdc_gordon_gpio_init(); msm_fb_add_devices(); #ifdef CONFIG_USB_EHCI_MSM_72K msm7x2x_init_host(); #endif msm7x2x_init_mmc(); bt_power_init(); if (cpu_is_msm7x27()) msm_pm_set_platform_data(msm7x27_pm_data, ARRAY_SIZE(msm7x27_pm_data)); else msm_pm_set_platform_data(msm7x25_pm_data, ARRAY_SIZE(msm7x25_pm_data)); BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata)); msm7x27_wlan_init(); } static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE; static int __init pmem_kernel_ebi1_size_setup(char *p) { pmem_kernel_ebi1_size = memparse(p, NULL); return 0; } early_param("pmem_kernel_ebi1_size", pmem_kernel_ebi1_size_setup); static unsigned pmem_mdp_size = MSM_PMEM_MDP_SIZE; static int __init pmem_mdp_size_setup(char *p) { pmem_mdp_size = memparse(p, NULL); return 0; } early_param("pmem_mdp_size", pmem_mdp_size_setup); static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE; static int __init pmem_adsp_size_setup(char *p) { pmem_adsp_size = memparse(p, NULL); return 0; } early_param("pmem_adsp_size", pmem_adsp_size_setup); static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE; static int __init pmem_audio_size_setup(char *p) { pmem_audio_size = memparse(p, NULL); return 0; } early_param("pmem_audio_size", pmem_audio_size_setup); static unsigned fb_size = MSM_FB_SIZE; static int __init fb_size_setup(char *p) { fb_size = memparse(p, NULL); return 0; } early_param("fb_size", fb_size_setup); static void __init msm_msm7x2x_allocate_memory_regions(void) { void *addr; unsigned long size; size = fb_size ? : MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); } static struct memtype_reserve msm7x27_reserve_table[] __initdata = { [MEMTYPE_SMI] = { }, [MEMTYPE_EBI0] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, [MEMTYPE_EBI1] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, }; static void __init size_pmem_devices(void) { #ifdef CONFIG_ANDROID_PMEM android_pmem_adsp_pdata.size = pmem_adsp_size; android_pmem_pdata.size = pmem_mdp_size; android_pmem_audio_pdata.size = pmem_audio_size; #endif } static void __init reserve_memory_for(struct android_pmem_platform_data *p) { msm7x27_reserve_table[p->memory_type].size += p->size; } static void __init reserve_pmem_memory(void) { #ifdef CONFIG_ANDROID_PMEM reserve_memory_for(&android_pmem_adsp_pdata); reserve_memory_for(&android_pmem_pdata); reserve_memory_for(&android_pmem_audio_pdata); msm7x27_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size; #endif } static void __init msm7x27_calculate_reserve_sizes(void) { size_pmem_devices(); reserve_pmem_memory(); } static int msm7x27_paddr_to_memtype(unsigned int paddr) { return MEMTYPE_EBI1; } static struct reserve_info msm7x27_reserve_info __initdata = { .memtype_reserve_table = msm7x27_reserve_table, .calculate_reserve_sizes = msm7x27_calculate_reserve_sizes, .paddr_to_memtype = msm7x27_paddr_to_memtype, }; static void __init msm7x27_reserve(void) { reserve_info = &msm7x27_reserve_info; msm_reserve(); } static void __init msm7x27_init_early(void) { msm_msm7x2x_allocate_memory_regions(); } static void __init msm7x2x_map_io(void) { msm_map_common_io(); if (socinfo_init() < 0) BUG(); #ifdef CONFIG_CACHE_L2X0 if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) { /* 7x27 has 256KB L2 cache: 64Kb/Way and 4-Way Associativity; evmon/parity/share disabled. */ if ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) > 1) || ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) && (SOCINFO_VERSION_MINOR(socinfo_get_version()) >= 3))) /* R/W latency: 4 cycles; */ l2x0_init(MSM_L2CC_BASE, 0x0006801B, 0xfe000000); else /* R/W latency: 3 cycles; */ l2x0_init(MSM_L2CC_BASE, 0x00068012, 0xfe000000); } #endif } MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF") .boot_params = PLAT_PHYS_OFFSET + 0x100, .map_io = msm7x2x_map_io, .reserve = msm7x27_reserve, .init_irq = msm7x2x_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x27_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA") .boot_params = PLAT_PHYS_OFFSET + 0x100, .map_io = msm7x2x_map_io, .reserve = msm7x27_reserve, .init_irq = msm7x2x_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x27_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF") .boot_params = PLAT_PHYS_OFFSET + 0x100, .map_io = msm7x2x_map_io, .reserve = msm7x27_reserve, .init_irq = msm7x2x_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x27_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA") .boot_params = PLAT_PHYS_OFFSET + 0x100, .map_io = msm7x2x_map_io, .reserve = msm7x27_reserve, .init_irq = msm7x2x_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x27_init_early, .handle_irq = vic_handle_irq, MACHINE_END
gpl-2.0
Luquidtester/DirtyKernel-3x-ION
drivers/staging/msm/memory.c
87
5938
/* arch/arm/mach-msm/memory.c * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2009-2010, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/bootmem.h> #include <linux/module.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/mach/map.h> #include "memory_ll.h" #include <asm/cacheflush.h> #if defined(CONFIG_MSM_NPA_REMOTE) #include "npa_remote.h" #include <linux/completion.h> #include <linux/err.h> #endif int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { unsigned long pfn_addr = pfn << PAGE_SHIFT; /* if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) { prot = pgprot_device(prot); printk("remapping device %lx\n", prot); } */ panic("Memory remap PFN stuff not done\n"); return remap_pfn_range(vma, addr, pfn, size, prot); } void *zero_page_strongly_ordered; static void map_zero_page_strongly_ordered(void) { if (zero_page_strongly_ordered) return; /* zero_page_strongly_ordered = ioremap_strongly_ordered(page_to_pfn(empty_zero_page) << PAGE_SHIFT, PAGE_SIZE); */ panic("Strongly ordered memory functions not implemented\n"); } void write_to_strongly_ordered_memory(void) { map_zero_page_strongly_ordered(); *(int *)zero_page_strongly_ordered = 0; } EXPORT_SYMBOL(write_to_strongly_ordered_memory); void flush_axi_bus_buffer(void) { __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ : : "r" (0) : "memory"); write_to_strongly_ordered_memory(); } #define CACHE_LINE_SIZE 32 /* These cache related routines make the assumption that the associated * physical memory is contiguous. They will operate on all (L1 * and L2 if present) caches. */ void clean_and_invalidate_caches(unsigned long vstart, unsigned long length, unsigned long pstart) { unsigned long vaddr; for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr)); #ifdef CONFIG_OUTER_CACHE outer_flush_range(pstart, pstart + length); #endif asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); flush_axi_bus_buffer(); } void clean_caches(unsigned long vstart, unsigned long length, unsigned long pstart) { unsigned long vaddr; for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr)); #ifdef CONFIG_OUTER_CACHE outer_clean_range(pstart, pstart + length); #endif asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); flush_axi_bus_buffer(); } void invalidate_caches(unsigned long vstart, unsigned long length, unsigned long pstart) { unsigned long vaddr; for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE) asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr)); #ifdef CONFIG_OUTER_CACHE outer_inv_range(pstart, pstart + length); #endif asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0)); asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); flush_axi_bus_buffer(); } void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment) { void *unused_addr = NULL; unsigned long addr, tmp_size, unused_size; /* Allocate maximum size needed, see where it ends up. * Then free it -- in this path there are no other allocators * so we can depend on getting the same address back * when we allocate a smaller piece that is aligned * at the end (if necessary) and the piece we really want, * then free the unused first piece. */ tmp_size = size + alignment - PAGE_SIZE; addr = (unsigned long)alloc_bootmem(tmp_size); free_bootmem(__pa(addr), tmp_size); unused_size = alignment - (addr % alignment); if (unused_size) unused_addr = alloc_bootmem(unused_size); addr = (unsigned long)alloc_bootmem(size); if (unused_size) free_bootmem(__pa(unused_addr), unused_size); return (void *)addr; } #if defined(CONFIG_MSM_NPA_REMOTE) struct npa_client *npa_memory_client; #endif static int change_memory_power_state(unsigned long start_pfn, unsigned long nr_pages, int state) { #if defined(CONFIG_MSM_NPA_REMOTE) static atomic_t node_created_flag = ATOMIC_INIT(1); #else unsigned long start; unsigned long size; unsigned long virtual; #endif int rc = 0; #if defined(CONFIG_MSM_NPA_REMOTE) if (atomic_dec_and_test(&node_created_flag)) { /* Create NPA 'required' client. */ npa_memory_client = npa_create_sync_client(NPA_MEMORY_NODE_NAME, "memory node", NPA_CLIENT_REQUIRED); if (IS_ERR(npa_memory_client)) { rc = PTR_ERR(npa_memory_client); return rc; } } rc = npa_issue_required_request(npa_memory_client, state); #else if (state == MEMORY_DEEP_POWERDOWN) { /* simulate turning off memory by writing bit pattern into it */ start = start_pfn << PAGE_SHIFT; size = nr_pages << PAGE_SHIFT; virtual = __phys_to_virt(start); memset((void *)virtual, 0x27, size); } #endif return rc; } int platform_physical_remove_pages(unsigned long start_pfn, unsigned long nr_pages) { return change_memory_power_state(start_pfn, nr_pages, MEMORY_DEEP_POWERDOWN); } int platform_physical_add_pages(unsigned long start_pfn, unsigned long nr_pages) { return change_memory_power_state(start_pfn, nr_pages, MEMORY_ACTIVE); } int platform_physical_low_power_pages(unsigned long start_pfn, unsigned long nr_pages) { return change_memory_power_state(start_pfn, nr_pages, MEMORY_SELF_REFRESH); }
gpl-2.0
chrisch1974/htc-krait-3.4
drivers/usb/core/notify.c
343
1434
/* * All the USB notify logic * * (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de> * * notifier functions originally based on those in kernel/sys.c * but fixed up to not be so broken. * */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/usb.h> #include <linux/mutex.h> #include "usb.h" static BLOCKING_NOTIFIER_HEAD(usb_notifier_list); void usb_register_notify(struct notifier_block *nb) { blocking_notifier_chain_register(&usb_notifier_list, nb); } EXPORT_SYMBOL_GPL(usb_register_notify); void usb_unregister_notify(struct notifier_block *nb) { blocking_notifier_chain_unregister(&usb_notifier_list, nb); } EXPORT_SYMBOL_GPL(usb_unregister_notify); void usb_notify_add_device(struct usb_device *udev) { blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_ADD, udev); } void usb_notify_remove_device(struct usb_device *udev) { mutex_lock(&usbfs_mutex); blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_REMOVE, udev); mutex_unlock(&usbfs_mutex); } void usb_notify_config_device(struct usb_device *udev) { blocking_notifier_call_chain(&usb_notifier_list, USB_DEVICE_CONFIG, udev); } void usb_notify_add_bus(struct usb_bus *ubus) { blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_ADD, ubus); } void usb_notify_remove_bus(struct usb_bus *ubus) { blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus); }
gpl-2.0
hroark13/prevail_kernel_fixed
arch/sparc/prom/console_32.c
343
1215
/* * console.c: Routines that deal with sending and receiving IO * to/from the current console device using the PROM. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1998 Pete Zaitcev <zaitcev@yahoo.com> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/system.h> #include <linux/string.h> extern void restore_current(void); /* Non blocking put character to console device, returns -1 if * unsuccessful. */ static int prom_nbputchar(const char *buf) { unsigned long flags; int i = -1; spin_lock_irqsave(&prom_lock, flags); switch(prom_vers) { case PROM_V0: i = (*(romvec->pv_nbputchar))(*buf); break; case PROM_V2: case PROM_V3: if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, buf, 0x1) == 1) i = 0; break; default: break; }; restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return i; /* Ugh, we could spin forever on unsupported proms ;( */ } void prom_console_write_buf(const char *buf, int len) { while (len) { int n = prom_nbputchar(buf); if (n) continue; len--; buf++; } }
gpl-2.0
jokersax/Photon-blur-kernel
net/phonet/pn_netlink.c
343
4436
/* * File: pn_netlink.c * * Phonet netlink interface * * Copyright (C) 2008 Nokia Corporation. * * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com> * Original author: Sakari Ailus <sakari.ailus@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/phonet.h> #include <net/sock.h> #include <net/phonet/pn_dev.h> static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 pid, u32 seq, int event); void phonet_address_notify(int event, struct net_device *dev, u8 addr) { struct sk_buff *skb; int err = -ENOBUFS; skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + nla_total_size(1), GFP_KERNEL); if (skb == NULL) goto errout; err = fill_addr(skb, dev, addr, 0, 0, event); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, dev_net(dev), 0, RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err); } static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = { [IFA_LOCAL] = { .type = NLA_U8 }, }; static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct net_device *dev; struct ifaddrmsg *ifm; int err; u8 pnaddr; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy); if (err < 0) return err; ifm = nlmsg_data(nlh); if (tb[IFA_LOCAL] == NULL) return -EINVAL; pnaddr = nla_get_u8(tb[IFA_LOCAL]); if (pnaddr & 3) /* Phonet addresses only have 6 high-order bits */ return -EINVAL; dev = __dev_get_by_index(net, ifm->ifa_index); if (dev == NULL) return -ENODEV; if (nlh->nlmsg_type == RTM_NEWADDR) err = phonet_address_add(dev, pnaddr); else err = phonet_address_del(dev, pnaddr); if (!err) phonet_address_notify(nlh->nlmsg_type, dev, pnaddr); return err; } static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, u32 pid, u32 seq, int event) { struct ifaddrmsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifa_family = AF_PHONET; ifm->ifa_prefixlen = 0; ifm->ifa_flags = IFA_F_PERMANENT; ifm->ifa_scope = RT_SCOPE_LINK; ifm->ifa_index = dev->ifindex; NLA_PUT_U8(skb, IFA_LOCAL, addr); return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct phonet_device_list *pndevs; struct phonet_device *pnd; int dev_idx = 0, dev_start_idx = cb->args[0]; int addr_idx = 0, addr_start_idx = cb->args[1]; pndevs = phonet_device_list(sock_net(skb->sk)); spin_lock_bh(&pndevs->lock); list_for_each_entry(pnd, &pndevs->list, list) { u8 addr; if (dev_idx > dev_start_idx) addr_start_idx = 0; if (dev_idx++ < dev_start_idx) continue; addr_idx = 0; for (addr = find_first_bit(pnd->addrs, 64); addr < 64; addr = find_next_bit(pnd->addrs, 64, 1+addr)) { if (addr_idx++ < addr_start_idx) continue; if (fill_addr(skb, pnd->netdev, addr << 2, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0) goto out; } } out: spin_unlock_bh(&pndevs->lock); cb->args[0] = dev_idx; cb->args[1] = addr_idx; return skb->len; } int __init phonet_netlink_register(void) { int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); if (err) return err; /* Further __rtnl_register() cannot fail */ __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); return 0; }
gpl-2.0
securecrt/linux-leo
drivers/net/xen-netfront.c
343
45787
/* * Virtual network driver for conversing with remote driver backends. * * Copyright (c) 2002-2005, K A Fraser * Copyright (c) 2005, XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/if_ether.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <net/ip.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/page.h> #include <xen/grant_table.h> #include <xen/interface/io/netif.h> #include <xen/interface/memory.h> #include <xen/interface/grant_table.h> static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { struct page *page; unsigned offset; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) #define RX_COPY_THRESHOLD 256 #define GRANT_INVALID_REF 0 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) struct netfront_info { struct list_head list; struct net_device *netdev; struct napi_struct napi; unsigned int evtchn; struct xenbus_device *xbdev; spinlock_t tx_lock; struct xen_netif_tx_front_ring tx; int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * are linked from tx_skb_freelist through skb_entry.link. * * NB. Freelist index entries are always going to be less than * PAGE_OFFSET, whereas pointers to skbs will always be equal or * greater than PAGE_OFFSET: we use this property to distinguish * them. */ union skb_entry { struct sk_buff *skb; unsigned long link; } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; struct xen_netif_rx_front_ring rx; int rx_ring_ref; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 #define RX_DFL_MIN_TARGET 64 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) unsigned rx_min_target, rx_max_target, rx_target; struct sk_buff_head rx_batch; struct timer_list rx_refill_timer; struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; }; struct netfront_rx_info { struct xen_netif_rx_response rx; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; }; static void skb_entry_set_link(union skb_entry *list, unsigned short id) { list->link = id; } static int skb_entry_is_link(const union skb_entry *list) { BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); return ((unsigned long)list->skb < PAGE_OFFSET); } /* * Access macros for acquiring freeing slots in tx_skbs[]. */ static void add_id_to_freelist(unsigned *head, union skb_entry *list, unsigned short id) { skb_entry_set_link(&list[id], *head); *head = id; } static unsigned short get_id_from_freelist(unsigned *head, union skb_entry *list) { unsigned int id = *head; *head = list[id].link; return id; } static int xennet_rxidx(RING_IDX idx) { return idx & (NET_RX_RING_SIZE - 1); } static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); struct sk_buff *skb = np->rx_skbs[i]; np->rx_skbs[i] = NULL; return skb; } static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, RING_IDX ri) { int i = xennet_rxidx(ri); grant_ref_t ref = np->grant_rx_ref[i]; np->grant_rx_ref[i] = GRANT_INVALID_REF; return ref; } #ifdef CONFIG_SYSFS static int xennet_sysfs_addif(struct net_device *netdev); static void xennet_sysfs_delif(struct net_device *netdev); #else /* !CONFIG_SYSFS */ #define xennet_sysfs_addif(dev) (0) #define xennet_sysfs_delif(dev) do { } while (0) #endif static int xennet_can_sg(struct net_device *dev) { return dev->features & NETIF_F_SG; } static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netfront_info *np = netdev_priv(dev); napi_schedule(&np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) { return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); } static void xennet_maybe_wake_tx(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); if (unlikely(netif_queue_stopped(dev)) && netfront_tx_slot_available(np) && likely(netif_running(dev))) netif_wake_queue(dev); } static void xennet_alloc_rx_buffers(struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; grant_ref_t ref; unsigned long pfn; void *vaddr; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) return; /* * Allocate skbuffs greedily, even though we batch updates to the * receive ring. This creates a less bursty demand on the memory * allocator, so should reduce the chance of failed allocation requests * both for ourself and for other kernel subsystems. */ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) goto no_skb; /* Align ip header to a 16 bytes boundary */ skb_reserve(skb, NET_IP_ALIGN); page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); if (!page) { kfree_skb(skb); no_skb: /* Any skbuffs queued for refill? Force them out. */ if (i != 0) goto refill; /* Could not allocate any skbuffs. Try again later. */ mod_timer(&np->rx_refill_timer, jiffies + (HZ/10)); break; } skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->nr_frags = 1; __skb_queue_tail(&np->rx_batch, skb); } /* Is the batch large enough to be worthwhile? */ if (i < (np->rx_target/2)) { if (req_prod > np->rx.sring->req_prod) goto push; return; } /* Adjust our fill target if we risked running out of buffers. */ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && ((np->rx_target *= 2) > np->rx_max_target)) np->rx_target = np->rx_max_target; refill: for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; skb->dev = dev; id = xennet_rxidx(req_prod + i); BUG_ON(np->rx_skbs[id]); np->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&np->gref_rx_head); BUG_ON((signed short)ref < 0); np->grant_rx_ref[id] = ref; pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); vaddr = page_address(skb_shinfo(skb)->frags[0].page); req = RING_GET_REQUEST(&np->rx, req_prod + i); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, pfn_to_mfn(pfn), 0); req->id = id; req->gref = ref; } wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; push: RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); } static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_bh(&np->rx_lock); netif_start_queue(dev); return 0; } static void xennet_tx_buf_gc(struct net_device *dev) { RING_IDX cons, prod; unsigned short id; struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; BUG_ON(!netif_carrier_ok(dev)); do { prod = np->tx.sring->rsp_prod; rmb(); /* Ensure we see responses up to 'rp'. */ for (cons = np->tx.rsp_cons; cons != prod; cons++) { struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); if (txrsp->status == NETIF_RSP_NULL) continue; id = txrsp->id; skb = np->tx_skbs[id].skb; if (unlikely(gnttab_query_foreign_access( np->grant_tx_ref[id]) != 0)) { printk(KERN_ALERT "xennet_tx_buf_gc: warning " "-- grant still in use by backend " "domain.\n"); BUG(); } gnttab_end_foreign_access_ref( np->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } np->tx.rsp_cons = prod; /* * Set a new event, then check for race with update of tx_cons. * Note that it is essential to schedule a callback, no matter * how few buffers are pending. Even if there is space in the * transmit ring, higher layers may be blocked because too much * data is outstanding: in such cases notification from Xen is * likely to be the only kick that we'll get. */ np->tx.sring->rsp_event = prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; mb(); /* update shared area */ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); xennet_maybe_wake_tx(dev); } static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, struct xen_netif_tx_request *tx) { struct netfront_info *np = netdev_priv(dev); char *data = skb->data; unsigned long mfn; RING_IDX prod = np->tx.req_prod_pvt; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); unsigned int id; grant_ref_t ref; int i; /* While the header overlaps a page boundary (including being larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; tx->flags |= NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; tx->flags = 0; } /* Grant backend access to each skb fragment page. */ for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; tx->flags |= NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); tx = RING_GET_REQUEST(&np->tx, prod++); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = pfn_to_mfn(page_to_pfn(frag->page)); gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = frag->page_offset; tx->size = frag->size; tx->flags = 0; } np->tx.req_prod_pvt = prod; } static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); struct xen_netif_tx_request *tx; struct xen_netif_extra_info *extra; char *data = skb->data; RING_IDX i; grant_ref_t ref; unsigned long mfn; int notify; int frags = skb_shinfo(skb)->nr_frags; unsigned int offset = offset_in_page(data); unsigned int len = skb_headlen(skb); frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); if (unlikely(frags > MAX_SKB_FRAGS + 1)) { printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", frags); dump_stack(); goto drop; } spin_lock_irq(&np->tx_lock); if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); goto drop; } i = np->tx.req_prod_pvt; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb; tx = RING_GET_REQUEST(&np->tx, i); tx->id = id; ref = gnttab_claim_grant_reference(&np->gref_tx_head); BUG_ON((signed short)ref < 0); mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; extra = NULL; tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ tx->flags |= NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; gso = (struct xen_netif_extra_info *) RING_GET_REQUEST(&np->tx, ++i); if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else tx->flags |= NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.pad = 0; gso->u.gso.features = 0; gso->type = XEN_NETIF_EXTRA_TYPE_GSO; gso->flags = 0; extra = gso; } np->tx.req_prod_pvt = i + 1; xennet_make_frags(skb, dev, tx); tx->size = skb->len; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); if (notify) notify_remote_via_irq(np->netdev->irq); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) netif_stop_queue(dev); spin_unlock_irq(&np->tx_lock); return NETDEV_TX_OK; drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); napi_disable(&np->napi); return 0; } static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { int new = xennet_rxidx(np->rx.req_prod_pvt); BUG_ON(np->rx_skbs[new]); np->rx_skbs[new] = skb; np->grant_rx_ref[new] = ref; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; np->rx.req_prod_pvt++; } static int xennet_get_extras(struct netfront_info *np, struct xen_netif_extra_info *extras, RING_IDX rp) { struct xen_netif_extra_info *extra; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; int err = 0; do { struct sk_buff *skb; grant_ref_t ref; if (unlikely(cons + 1 == rp)) { if (net_ratelimit()) dev_warn(dev, "Missing extra info\n"); err = -EBADR; break; } extra = (struct xen_netif_extra_info *) RING_GET_RESPONSE(&np->rx, ++cons); if (unlikely(!extra->type || extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { if (net_ratelimit()) dev_warn(dev, "Invalid extra type: %d\n", extra->type); err = -EINVAL; } else { memcpy(&extras[extra->type - 1], extra, sizeof(*extra)); } skb = xennet_get_rx_skb(np, cons); ref = xennet_get_rx_ref(np, cons); xennet_move_rx_slot(np, skb, ref); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); np->rx.rsp_cons = cons; return err; } static int xennet_get_responses(struct netfront_info *np, struct netfront_rx_info *rinfo, RING_IDX rp, struct sk_buff_head *list) { struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_extra_info *extras = rinfo->extras; struct device *dev = &np->netdev->dev; RING_IDX cons = np->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(np, cons); grant_ref_t ref = xennet_get_rx_ref(np, cons); int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); int frags = 1; int err = 0; unsigned long ret; if (rx->flags & NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } for (;;) { if (unlikely(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { if (net_ratelimit()) dev_warn(dev, "rx->offset: %x, size: %u\n", rx->offset, rx->status); xennet_move_rx_slot(np, skb, ref); err = -EINVAL; goto next; } /* * This definitely indicates a bug, either in this driver or in * the backend driver. In future this should flag the bad * situation to the system controller to reboot the backed. */ if (ref == GRANT_INVALID_REF) { if (net_ratelimit()) dev_warn(dev, "Bad rx response id %d.\n", rx->id); err = -EINVAL; goto next; } ret = gnttab_end_foreign_access_ref(ref, 0); BUG_ON(!ret); gnttab_release_grant_reference(&np->gref_rx_head, ref); __skb_queue_tail(list, skb); next: if (!(rx->flags & NETRXF_more_data)) break; if (cons + frags == rp) { if (net_ratelimit()) dev_warn(dev, "Need more frags\n"); err = -ENOENT; break; } rx = RING_GET_RESPONSE(&np->rx, cons + frags); skb = xennet_get_rx_skb(np, cons + frags); ref = xennet_get_rx_ref(np, cons + frags); frags++; } if (unlikely(frags > max)) { if (net_ratelimit()) dev_warn(dev, "Too many frags\n"); err = -E2BIG; } if (unlikely(err)) np->rx.rsp_cons = cons + frags; return err; } static int xennet_set_skb_gso(struct sk_buff *skb, struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { if (net_ratelimit()) printk(KERN_WARNING "GSO size must not be zero.\n"); return -EINVAL; } /* Currently only TCPv4 S.O. is supported. */ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { if (net_ratelimit()) printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; return 0; } static RING_IDX xennet_fill_frags(struct netfront_info *np, struct sk_buff *skb, struct sk_buff_head *list) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; RING_IDX cons = np->rx.rsp_cons; skb_frag_t *frag = shinfo->frags + nr_frags; struct sk_buff *nskb; while ((nskb = __skb_dequeue(list))) { struct xen_netif_rx_response *rx = RING_GET_RESPONSE(&np->rx, ++cons); frag->page = skb_shinfo(nskb)->frags[0].page; frag->page_offset = rx->offset; frag->size = rx->status; skb->data_len += rx->status; skb_shinfo(nskb)->nr_frags = 0; kfree_skb(nskb); frag++; nr_frags++; } shinfo->nr_frags = nr_frags; return cons; } static int skb_checksum_setup(struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; if (skb->protocol != htons(ETH_P_IP)) goto out; iph = (void *)skb->data; th = skb->data + 4 * iph->ihl; if (th >= skb_tail_pointer(skb)) goto out; skb->csum_start = th - skb->head; switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); break; default: if (net_ratelimit()) printk(KERN_ERR "Attempting to checksum a non-" "TCP/UDP packet, dropping a protocol" " %d packet", iph->protocol); goto out; } if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) goto out; err = 0; out: return err; } static int handle_incoming_queue(struct net_device *dev, struct sk_buff_head *rxq) { int packets_dropped = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { struct page *page = NETFRONT_SKB_CB(skb)->page; void *vaddr = page_address(page); unsigned offset = NETFRONT_SKB_CB(skb)->offset; memcpy(skb->data, vaddr + offset, skb_headlen(skb)); if (page != skb_shinfo(skb)->frags[0].page) __free_page(page); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_setup(skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); } return packets_dropped; } static int xennet_poll(struct napi_struct *napi, int budget) { struct netfront_info *np = container_of(napi, struct netfront_info, napi); struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; unsigned int len; int err; spin_lock(&np->rx_lock); skb_queue_head_init(&rxq); skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ i = np->rx.rsp_cons; work_done = 0; while ((i != rp) && (work_done < budget)) { memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); err = xennet_get_responses(np, &rinfo, rp, &tmpq); if (unlikely(err)) { err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } skb = __skb_dequeue(&tmpq); if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; if (unlikely(xennet_set_skb_gso(skb, gso))) { __skb_queue_head(&tmpq, skb); np->rx.rsp_cons += skb_queue_len(&tmpq); goto err; } } NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; NETFRONT_SKB_CB(skb)->offset = rx->offset; len = rx->status; if (len > RX_COPY_THRESHOLD) len = RX_COPY_THRESHOLD; skb_put(skb, len); if (rx->status > len) { skb_shinfo(skb)->frags[0].page_offset = rx->offset + len; skb_shinfo(skb)->frags[0].size = rx->status - len; skb->data_len = rx->status - len; } else { skb_shinfo(skb)->frags[0].page = NULL; skb_shinfo(skb)->nr_frags = 0; } i = xennet_fill_frags(np, skb, &tmpq); /* * Truesize approximates the size of true data plus * any supervisor overheads. Adding hypervisor * overheads has been shown to significantly reduce * achievable bandwidth with the default receive * buffer size. It is therefore not wise to account * for it here. * * After alloc_skb(RX_COPY_THRESHOLD), truesize is set * to RX_COPY_THRESHOLD + the supervisor * overheads. Here, we add the size of the data pulled * in xennet_fill_frags(). * * We also adjust for any unused space in the main * data area by subtracting (RX_COPY_THRESHOLD - * len). This is especially important with drivers * which split incoming packets into header and data, * using only 66 bytes of the main data area (see the * e1000 driver for example.) On such systems, * without this last adjustement, our achievable * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; if (rx->flags & NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; else if (rx->flags & NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); np->rx.rsp_cons = ++i; work_done++; } __skb_queue_purge(&errq); work_done -= handle_incoming_queue(dev, &rxq); /* If we get a callback with very few responses, reduce fill target. */ /* NB. Note exponential increase, linear decrease. */ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > ((3*np->rx_target) / 4)) && (--np->rx_target < np->rx_min_target)) np->rx_target = np->rx_min_target; xennet_alloc_rx_buffers(dev); if (work_done < budget) { int more_to_do = 0; local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) { int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; if (mtu > max) return -EINVAL; dev->mtu = mtu; return 0; } static void xennet_release_tx_bufs(struct netfront_info *np) { struct sk_buff *skb; int i; for (i = 0; i < NET_TX_RING_SIZE; i++) { /* Skip over entries which are actually freelist references */ if (skb_entry_is_link(&np->tx_skbs[i])) continue; skb = np->tx_skbs[i].skb; gnttab_end_foreign_access_ref(np->grant_tx_ref[i], GNTMAP_readonly); gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[i]); np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); } } static void xennet_release_rx_bufs(struct netfront_info *np) { struct mmu_update *mmu = np->rx_mmu; struct multicall_entry *mcl = np->rx_mcl; struct sk_buff_head free_list; struct sk_buff *skb; unsigned long mfn; int xfer = 0, noxfer = 0, unused = 0; int id, ref; dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", __func__); return; skb_queue_head_init(&free_list); spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { ref = np->grant_rx_ref[id]; if (ref == GRANT_INVALID_REF) { unused++; continue; } skb = np->rx_skbs[id]; mfn = gnttab_end_foreign_transfer_ref(ref); gnttab_release_grant_reference(&np->gref_rx_head, ref); np->grant_rx_ref[id] = GRANT_INVALID_REF; if (0 == mfn) { skb_shinfo(skb)->nr_frags = 0; dev_kfree_skb(skb); noxfer++; continue; } if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Remap the page. */ struct page *page = skb_shinfo(skb)->frags[0].page; unsigned long pfn = page_to_pfn(page); void *vaddr = page_address(page); MULTI_update_va_mapping(mcl, (unsigned long)vaddr, mfn_pte(mfn, PAGE_KERNEL), 0); mcl++; mmu->ptr = ((u64)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu->val = pfn; mmu++; set_phys_to_machine(pfn, mfn); } __skb_queue_tail(&free_list, skb); xfer++; } dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", __func__, xfer, noxfer, unused); if (xfer) { if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Do all the remapping work and M2P updates. */ MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, NULL, DOMID_SELF); mcl++; HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); } } __skb_queue_purge(&free_list); spin_unlock_bh(&np->rx_lock); } static void xennet_uninit(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); xennet_release_tx_bufs(np); xennet_release_rx_bufs(np); gnttab_free_grant_references(np->gref_tx_head); gnttab_free_grant_references(np->gref_rx_head); } static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_uninit = xennet_uninit, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, .ndo_change_mtu = xennet_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) { int i, err; struct net_device *netdev; struct netfront_info *np; netdev = alloc_etherdev(sizeof(struct netfront_info)); if (!netdev) { printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __func__); return ERR_PTR(-ENOMEM); } np = netdev_priv(netdev); np->xbdev = dev; spin_lock_init(&np->tx_lock); spin_lock_init(&np->rx_lock); skb_queue_head_init(&np->rx_batch); np->rx_target = RX_DFL_MIN_TARGET; np->rx_min_target = RX_DFL_MIN_TARGET; np->rx_max_target = RX_MAX_TARGET; init_timer(&np->rx_refill_timer); np->rx_refill_timer.data = (unsigned long)netdev; np->rx_refill_timer.function = rx_refill_timeout; /* Initialise tx_skbs as a free chain containing every entry. */ np->tx_skb_freelist = 0; for (i = 0; i < NET_TX_RING_SIZE; i++) { skb_entry_set_link(&np->tx_skbs[i], i+1); np->grant_tx_ref[i] = GRANT_INVALID_REF; } /* Clear out rx_skbs */ for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; } /* A grant for every tx ring slot */ if (gnttab_alloc_grant_references(TX_MAX_TARGET, &np->gref_tx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); err = -ENOMEM; goto exit; } /* A grant for every rx ring slot */ if (gnttab_alloc_grant_references(RX_MAX_TARGET, &np->gref_rx_head) < 0) { printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); err = -ENOMEM; goto exit_free_tx; } netdev->netdev_ops = &xennet_netdev_ops; netif_napi_add(netdev, &np->napi, xennet_poll, 64); netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; netif_carrier_off(netdev); return netdev; exit_free_tx: gnttab_free_grant_references(np->gref_tx_head); exit: free_netdev(netdev); return ERR_PTR(err); } /** * Entry point to this code when a new device is created. Allocate the basic * structures and the ring buffers for communication with the backend, and * inform the backend of the appropriate details for those. */ static int __devinit netfront_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int err; struct net_device *netdev; struct netfront_info *info; netdev = xennet_create_dev(dev); if (IS_ERR(netdev)) { err = PTR_ERR(netdev); xenbus_dev_fatal(dev, err, "creating netdev"); return err; } info = netdev_priv(netdev); dev_set_drvdata(&dev->dev, info); err = register_netdev(info->netdev); if (err) { printk(KERN_WARNING "%s: register_netdev err=%d\n", __func__, err); goto fail; } err = xennet_sysfs_addif(info->netdev); if (err) { unregister_netdev(info->netdev); printk(KERN_WARNING "%s: add sysfs failed err=%d\n", __func__, err); goto fail; } return 0; fail: free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } static void xennet_end_access(int ref, void *page) { /* This frees the page as a side-effect */ if (ref != GRANT_INVALID_REF) gnttab_end_foreign_access(ref, 0, (unsigned long)page); } static void xennet_disconnect_backend(struct netfront_info *info) { /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); netif_carrier_off(info->netdev); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); if (info->netdev->irq) unbind_from_irqhandler(info->netdev->irq, info->netdev); info->evtchn = info->netdev->irq = 0; /* End access and free the pages */ xennet_end_access(info->tx_ring_ref, info->tx.sring); xennet_end_access(info->rx_ring_ref, info->rx.sring); info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->tx.sring = NULL; info->rx.sring = NULL; } /** * We are reconnecting to the backend, due to a suspend/resume, or a backend * driver restart. We tear down our netif structure and recreate it, but * leave the device-layer structures intact so that this is transparent to the * rest of the kernel. */ static int netfront_resume(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); xennet_disconnect_backend(info); return 0; } static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) { char *s, *e, *macstr; int i; macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); if (IS_ERR(macstr)) return PTR_ERR(macstr); for (i = 0; i < ETH_ALEN; i++) { mac[i] = simple_strtoul(s, &e, 16); if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { kfree(macstr); return -ENOENT; } s = e+1; } kfree(macstr); return 0; } static irqreturn_t xennet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct netfront_info *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); if (likely(netif_carrier_ok(dev))) { xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) napi_schedule(&np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); return IRQ_HANDLED; } static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) { struct xen_netif_tx_sring *txs; struct xen_netif_rx_sring *rxs; int err; struct net_device *netdev = info->netdev; info->tx_ring_ref = GRANT_INVALID_REF; info->rx_ring_ref = GRANT_INVALID_REF; info->rx.sring = NULL; info->tx.sring = NULL; netdev->irq = 0; err = xen_net_read_mac(dev, netdev->dev_addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto fail; } txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!txs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating tx ring page"); goto fail; } SHARED_RING_INIT(txs); FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(txs)); if (err < 0) { free_page((unsigned long)txs); goto fail; } info->tx_ring_ref = err; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); if (err < 0) { free_page((unsigned long)rxs); goto fail; } info->rx_ring_ref = err; err = xenbus_alloc_evtchn(dev, &info->evtchn); if (err) goto fail; err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, IRQF_SAMPLE_RANDOM, netdev->name, netdev); if (err < 0) goto fail; netdev->irq = err; return 0; fail: return err; } /* Common code used when first setting up, and when resuming. */ static int talk_to_backend(struct xenbus_device *dev, struct netfront_info *info) { const char *message; struct xenbus_transaction xbt; int err; /* Create shared ring, alloc event channel. */ err = setup_netfront(dev, info); if (err) goto out; again: err = xenbus_transaction_start(&xbt); if (err) { xenbus_dev_fatal(dev, err, "starting transaction"); goto destroy_ring; } err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", info->tx_ring_ref); if (err) { message = "writing tx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", info->rx_ring_ref); if (err) { message = "writing rx ring-ref"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", info->evtchn); if (err) { message = "writing event-channel"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1); if (err) { message = "writing request-rx-copy"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); if (err) { message = "writing feature-rx-notify"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); if (err) { message = "writing feature-sg"; goto abort_transaction; } err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); if (err) { message = "writing feature-gso-tcpv4"; goto abort_transaction; } err = xenbus_transaction_end(xbt, 0); if (err) { if (err == -EAGAIN) goto again; xenbus_dev_fatal(dev, err, "completing transaction"); goto destroy_ring; } return 0; abort_transaction: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, err, "%s", message); destroy_ring: xennet_disconnect_backend(info); out: return err; } static int xennet_set_sg(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } else if (dev->mtu > ETH_DATA_LEN) dev->mtu = ETH_DATA_LEN; return ethtool_op_set_sg(dev, data); } static int xennet_set_tso(struct net_device *dev, u32 data) { if (data) { struct netfront_info *np = netdev_priv(dev); int val; if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-gso-tcpv4", "%d", &val) < 0) val = 0; if (!val) return -ENOSYS; } return ethtool_op_set_tso(dev, data); } static void xennet_set_features(struct net_device *dev) { /* Turn off all GSO bits except ROBUST. */ dev->features &= ~NETIF_F_GSO_MASK; dev->features |= NETIF_F_GSO_ROBUST; xennet_set_sg(dev, 0); /* We need checksum offload to enable scatter/gather and TSO. */ if (!(dev->features & NETIF_F_IP_CSUM)) return; if (!xennet_set_sg(dev, 1)) xennet_set_tso(dev, 1); } static int xennet_connect(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); int i, requeue_idx, err; struct sk_buff *skb; grant_ref_t ref; struct xen_netif_rx_request *req; unsigned int feature_rx_copy; err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-rx-copy", "%u", &feature_rx_copy); if (err != 1) feature_rx_copy = 0; if (!feature_rx_copy) { dev_info(&dev->dev, "backend does not support copying receive path\n"); return -ENODEV; } err = talk_to_backend(np->xbdev, np); if (err) return err; xennet_set_features(dev); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); /* Step 1: Discard all pending TX packet fragments. */ xennet_release_tx_bufs(np); /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { if (!np->rx_skbs[i]) continue; skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); req = RING_GET_REQUEST(&np->rx, requeue_idx); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> frags->page)), 0); req->gref = ref; req->id = requeue_idx; requeue_idx++; } np->rx.req_prod_pvt = requeue_idx; /* * Step 3: All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver * domain a kick because we've probably just requeued some * packets. */ netif_carrier_on(np->netdev); notify_remote_via_irq(np->netdev->irq); xennet_tx_buf_gc(dev); xennet_alloc_rx_buffers(dev); spin_unlock_irq(&np->tx_lock); spin_unlock_bh(&np->rx_lock); return 0; } /** * Callback received when the backend's state changes. */ static void backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct netfront_info *np = dev_get_drvdata(&dev->dev); struct net_device *netdev = np->netdev; dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateConnected: case XenbusStateUnknown: case XenbusStateClosed: break; case XenbusStateInitWait: if (dev->state != XenbusStateInitialising) break; if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); break; case XenbusStateClosing: xenbus_frontend_closed(dev); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; #ifdef CONFIG_SYSFS static ssize_t show_rxbuf_min(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_min_target); } static ssize_t store_rxbuf_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target > np->rx_max_target) np->rx_max_target = target; np->rx_min_target = target; if (target > np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_max(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_max_target); } static ssize_t store_rxbuf_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *np = netdev_priv(netdev); char *endp; unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; target = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; if (target < RX_MIN_TARGET) target = RX_MIN_TARGET; if (target > RX_MAX_TARGET) target = RX_MAX_TARGET; spin_lock_bh(&np->rx_lock); if (target < np->rx_min_target) np->rx_min_target = target; np->rx_max_target = target; if (target < np->rx_target) np->rx_target = target; xennet_alloc_rx_buffers(netdev); spin_unlock_bh(&np->rx_lock); return len; } static ssize_t show_rxbuf_cur(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct netfront_info *info = netdev_priv(netdev); return sprintf(buf, "%u\n", info->rx_target); } static struct device_attribute xennet_attrs[] = { __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), }; static int xennet_sysfs_addif(struct net_device *netdev) { int i; int err; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { err = device_create_file(&netdev->dev, &xennet_attrs[i]); if (err) goto fail; } return 0; fail: while (--i >= 0) device_remove_file(&netdev->dev, &xennet_attrs[i]); return err; } static void xennet_sysfs_delif(struct net_device *netdev) { int i; for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) device_remove_file(&netdev->dev, &xennet_attrs[i]); } #endif /* CONFIG_SYSFS */ static struct xenbus_device_id netfront_ids[] = { { "vif" }, { "" } }; static int __devexit xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); dev_dbg(&dev->dev, "%s\n", dev->nodename); unregister_netdev(info->netdev); xennet_disconnect_backend(info); del_timer_sync(&info->rx_refill_timer); xennet_sysfs_delif(info->netdev); free_netdev(info->netdev); return 0; } static struct xenbus_driver netfront_driver = { .name = "vif", .owner = THIS_MODULE, .ids = netfront_ids, .probe = netfront_probe, .remove = __devexit_p(xennet_remove), .resume = netfront_resume, .otherend_changed = backend_changed, }; static int __init netif_init(void) { if (!xen_domain()) return -ENODEV; if (xen_initial_domain()) return 0; printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); return xenbus_register_frontend(&netfront_driver); } module_init(netif_init); static void __exit netif_exit(void) { if (xen_initial_domain()) return; xenbus_unregister_driver(&netfront_driver); } module_exit(netif_exit); MODULE_DESCRIPTION("Xen virtual network device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:vif"); MODULE_ALIAS("xennet");
gpl-2.0
ryanli/kernel_huawei_c8650
drivers/char/rtc.c
855
34292
/* * Real Time Clock interface for Linux * * Copyright (C) 1996 Paul Gortmaker * * This driver allows use of the real time clock (built into * nearly all computers) from user space. It exports the /dev/rtc * interface supporting various ioctl() and also the * /proc/driver/rtc pseudo-file for status information. * * The ioctls can be used to set the interrupt behaviour and * generation rate from the RTC via IRQ 8. Then the /dev/rtc * interface can be used to make use of these timer interrupts, * be they interval or alarm based. * * The /dev/rtc interface will block on reads until an interrupt * has been received. If a RTC interrupt has already happened, * it will output an unsigned long and then block. The output value * contains the interrupt status in the low byte and the number of * interrupts since the last read in the remaining high bytes. The * /dev/rtc interface can also be used with the select(2) call. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on other minimal char device drivers, like Alan's * watchdog, Ted's random, etc. etc. * * 1.07 Paul Gortmaker. * 1.08 Miquel van Smoorenburg: disallow certain things on the * DEC Alpha as the CMOS clock is also used for other things. * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup. * 1.09a Pete Zaitcev: Sun SPARC * 1.09b Jeff Garzik: Modularize, init cleanup * 1.09c Jeff Garzik: SMP cleanup * 1.10 Paul Barton-Davis: add support for async I/O * 1.10a Andrea Arcangeli: Alpha updates * 1.10b Andrew Morton: SMP lock fix * 1.10c Cesar Barros: SMP locking fixes and cleanup * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness. * 1.11 Takashi Iwai: Kernel access functions * rtc_register/rtc_unregister/rtc_control * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer * CONFIG_HPET_EMULATE_RTC * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly. * 1.12ac Alan Cox: Allow read access to the day of week register * 1.12b David John: Remove calls to the BKL. */ #define RTC_VERSION "1.12b" /* * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with * interrupts disabled. Due to the index-port/data-port (0x70/0x71) * design of the RTC, we don't want two different things trying to * get to it at once. (e.g. the periodic 11 min sync from time.c vs. * this driver.) */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/mc146818rtc.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <asm/current.h> #include <asm/system.h> #ifdef CONFIG_X86 #include <asm/hpet.h> #endif #ifdef CONFIG_SPARC32 #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> static unsigned long rtc_port; static int rtc_irq; #endif #ifdef CONFIG_HPET_EMULATE_RTC #undef RTC_IRQ #endif #ifdef RTC_IRQ static int rtc_has_irq = 1; #endif #ifndef CONFIG_HPET_EMULATE_RTC #define is_hpet_enabled() 0 #define hpet_set_alarm_time(hrs, min, sec) 0 #define hpet_set_periodic_freq(arg) 0 #define hpet_mask_rtc_irq_bit(arg) 0 #define hpet_set_rtc_irq_bit(arg) 0 #define hpet_rtc_timer_init() do { } while (0) #define hpet_rtc_dropped_irq() 0 #define hpet_register_irq_handler(h) ({ 0; }) #define hpet_unregister_irq_handler(h) ({ 0; }) #ifdef RTC_IRQ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { return 0; } #endif #endif /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static struct fasync_struct *rtc_async_queue; static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); #ifdef RTC_IRQ static void rtc_dropped_irq(unsigned long data); static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); #endif static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void rtc_get_rtc_time(struct rtc_time *rtc_tm); #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait); #endif static void get_rtc_alm_time(struct rtc_time *alm_tm); #ifdef RTC_IRQ static void set_rtc_irq_bit_locked(unsigned char bit); static void mask_rtc_irq_bit_locked(unsigned char bit); static inline void set_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); set_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } static void mask_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); mask_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } #endif #ifdef CONFIG_PROC_FS static int rtc_proc_open(struct inode *inode, struct file *file); #endif /* * Bits in rtc_status. (6 bits of room for future expansion) */ #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ #define RTC_TIMER_ON 0x02 /* missed irq timer active */ /* * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is * protected by the spin lock rtc_lock. However, ioctl can still disable the * timer in rtc_status and then with del_timer after the interrupt has read * rtc_status but before mod_timer is called, which would then reenable the * timer (but you would need to have an awful timing before you'd trip on it) */ static unsigned long rtc_status; /* bitmapped status byte. */ static unsigned long rtc_freq; /* Current periodic IRQ rate */ static unsigned long rtc_irq_data; /* our output to the world */ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ #ifdef RTC_IRQ /* * rtc_task_lock nests inside rtc_lock. */ static DEFINE_SPINLOCK(rtc_task_lock); static rtc_task_t *rtc_callback; #endif /* * If this driver ever becomes modularised, it will be really nice * to make the epoch retain its value across module reload... */ static unsigned long epoch = 1900; /* year corresponding to 0x00 */ static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* * Returns true if a clock update is in progress */ static inline unsigned char rtc_is_updating(void) { unsigned long flags; unsigned char uip; spin_lock_irqsave(&rtc_lock, flags); uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); spin_unlock_irqrestore(&rtc_lock, flags); return uip; } #ifdef RTC_IRQ /* * A very tiny interrupt handler. It runs with IRQF_DISABLED set, * but there is possibility of conflicting with the set_rtc_mmss() * call (the rtc irq and the timer irq can easily run at the same * time in two different CPUs). So we need to serialize * accesses to the chip with the rtc_lock spinlock that each * architecture should implement in the timer code. * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) */ static irqreturn_t rtc_interrupt(int irq, void *dev_id) { /* * Can be an alarm interrupt, update complete interrupt, * or a periodic interrupt. We store the status in the * low byte and the number of interrupts received since * the last read in the remainder of rtc_irq_data. */ spin_lock(&rtc_lock); rtc_irq_data += 0x100; rtc_irq_data &= ~0xff; if (is_hpet_enabled()) { /* * In this case it is HPET RTC interrupt handler * calling us, with the interrupt information * passed as arg1, instead of irq. */ rtc_irq_data |= (unsigned long)irq & 0xF0; } else { rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); } if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); spin_unlock(&rtc_lock); /* Now do the rest of the actions */ spin_lock(&rtc_task_lock); if (rtc_callback) rtc_callback->func(rtc_callback->private_data); spin_unlock(&rtc_task_lock); wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); return IRQ_HANDLED; } #endif /* * sysctl-tuning infrastructure. */ static ctl_table rtc_table[] = { { .procname = "max-user-freq", .data = &rtc_max_user_freq, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static ctl_table rtc_root[] = { { .procname = "rtc", .mode = 0555, .child = rtc_table, }, { } }; static ctl_table dev_root[] = { { .procname = "dev", .mode = 0555, .child = rtc_root, }, { } }; static struct ctl_table_header *sysctl_header; static int __init init_sysctl(void) { sysctl_header = register_sysctl_table(dev_root); return 0; } static void __exit cleanup_sysctl(void) { unregister_sysctl_table(sysctl_header); } /* * Now all the various file operations that we export. */ static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { #ifndef RTC_IRQ return -EIO; #else DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t retval; if (rtc_has_irq == 0) return -EIO; /* * Historically this function used to assume that sizeof(unsigned long) * is the same in userspace and kernelspace. This lead to problems * for configurations with multiple ABIs such a the MIPS o32 and 64 * ABIs supported on the same kernel. So now we support read of both * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the * userspace ABI. */ if (count != sizeof(unsigned int) && count != sizeof(unsigned long)) return -EINVAL; add_wait_queue(&rtc_wait, &wait); do { /* First make it right. Then make it fast. Putting this whole * block within the parentheses of a while would be too * confusing. And no, xchg() is not the answer. */ __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&rtc_lock); data = rtc_irq_data; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); if (data != 0) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } if (signal_pending(current)) { retval = -ERESTARTSYS; goto out; } schedule(); } while (1); if (count == sizeof(unsigned int)) { retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); } else { retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); } if (!retval) retval = count; out: __set_current_state(TASK_RUNNING); remove_wait_queue(&rtc_wait, &wait); return retval; #endif } static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) { struct rtc_time wtime; #ifdef RTC_IRQ if (rtc_has_irq == 0) { switch (cmd) { case RTC_AIE_OFF: case RTC_AIE_ON: case RTC_PIE_OFF: case RTC_PIE_ON: case RTC_UIE_OFF: case RTC_UIE_ON: case RTC_IRQP_READ: case RTC_IRQP_SET: return -EINVAL; }; } #endif switch (cmd) { #ifdef RTC_IRQ case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ { mask_rtc_irq_bit(RTC_AIE); return 0; } case RTC_AIE_ON: /* Allow alarm interrupts. */ { set_rtc_irq_bit(RTC_AIE); return 0; } case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ { /* can be called from isr via rtc_control() */ unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); mask_rtc_irq_bit_locked(RTC_PIE); if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_PIE_ON: /* Allow periodic ints */ { /* can be called from isr via rtc_control() */ unsigned long flags; /* * We don't really want Joe User enabling more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (rtc_freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) return -EACCES; spin_lock_irqsave(&rtc_lock, flags); if (!(rtc_status & RTC_TIMER_ON)) { mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_status |= RTC_TIMER_ON; } set_rtc_irq_bit_locked(RTC_PIE); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_UIE_OFF: /* Mask ints from RTC updates. */ { mask_rtc_irq_bit(RTC_UIE); return 0; } case RTC_UIE_ON: /* Allow ints for RTC updates. */ { set_rtc_irq_bit(RTC_UIE); return 0; } #endif case RTC_ALM_READ: /* Read the present alarm time */ { /* * This returns a struct rtc_time. Reading >= 0xc0 * means "don't care" or "match all". Only the tm_hour, * tm_min, and tm_sec values are filled in. */ memset(&wtime, 0, sizeof(struct rtc_time)); get_rtc_alm_time(&wtime); break; } case RTC_ALM_SET: /* Store a time into the alarm */ { /* * This expects a struct rtc_time. Writing 0xff means * "don't care" or "match all". Only the tm_hour, * tm_min and tm_sec are used. */ unsigned char hrs, min, sec; struct rtc_time alm_tm; if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; hrs = alm_tm.tm_hour; min = alm_tm.tm_min; sec = alm_tm.tm_sec; spin_lock_irq(&rtc_lock); if (hpet_set_alarm_time(hrs, min, sec)) { /* * Fallthru and set alarm time in CMOS too, * so that we will get proper value in RTC_ALM_READ */ } if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (sec < 60) sec = bin2bcd(sec); else sec = 0xff; if (min < 60) min = bin2bcd(min); else min = 0xff; if (hrs < 24) hrs = bin2bcd(hrs); else hrs = 0xff; } CMOS_WRITE(hrs, RTC_HOURS_ALARM); CMOS_WRITE(min, RTC_MINUTES_ALARM); CMOS_WRITE(sec, RTC_SECONDS_ALARM); spin_unlock_irq(&rtc_lock); return 0; } case RTC_RD_TIME: /* Read the time/date from RTC */ { memset(&wtime, 0, sizeof(struct rtc_time)); rtc_get_rtc_time(&wtime); break; } case RTC_SET_TIME: /* Set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned char save_control, save_freq_select; unsigned int yrs; #ifdef CONFIG_MACH_DECSTATION unsigned int real_yrs; #endif if (!capable(CAP_SYS_TIME)) return -EACCES; if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year + 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; if (yrs < 1970) return -EINVAL; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; yrs -= epoch; if (yrs > 255) /* They are unsigned */ return -EINVAL; spin_lock_irq(&rtc_lock); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; yrs = 72; /* * We want to keep the year set to 73 until March * for non-leap years, so that Feb, 29th is handled * correctly. */ if (!leap_yr && mon < 3) { real_yrs--; yrs = 73; } #endif /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ if (yrs > 169) { spin_unlock_irq(&rtc_lock); return -EINVAL; } if (yrs >= 100) yrs -= 100; if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); } save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); CMOS_WRITE(day, RTC_DAY_OF_MONTH); CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ case RTC_IRQP_READ: /* Read the periodic IRQ rate. */ { return put_user(rtc_freq, (unsigned long __user *)arg); } case RTC_IRQP_SET: /* Set periodic IRQ rate. */ { int tmp = 0; unsigned char val; /* can be called from isr via rtc_control() */ unsigned long flags; /* * The max we can do is 8192Hz. */ if ((arg < 2) || (arg > 8192)) return -EINVAL; /* * We don't really want Joe User generating more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (arg > rtc_max_user_freq) && !capable(CAP_SYS_RESOURCE)) return -EACCES; while (arg > (1<<tmp)) tmp++; /* * Check that the input was really a power of 2. */ if (arg != (1<<tmp)) return -EINVAL; rtc_freq = arg; spin_lock_irqsave(&rtc_lock, flags); if (hpet_set_periodic_freq(arg)) { spin_unlock_irqrestore(&rtc_lock, flags); return 0; } val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0; val |= (16 - tmp); CMOS_WRITE(val, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } #endif case RTC_EPOCH_READ: /* Read the epoch. */ { return put_user(epoch, (unsigned long __user *)arg); } case RTC_EPOCH_SET: /* Set the epoch. */ { /* * There were no RTC clocks before 1900. */ if (arg < 1900) return -EINVAL; if (!capable(CAP_SYS_TIME)) return -EACCES; epoch = arg; return 0; } default: return -ENOTTY; } return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; } static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; ret = rtc_do_ioctl(cmd, arg, 0); return ret; } /* * We enforce only one user at a time here with the open/close. * Also clear the previous interrupt data on an open, and clean * up things on a close. */ static int rtc_open(struct inode *inode, struct file *file) { spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) goto out_busy; rtc_status |= RTC_IS_OPEN; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); return 0; out_busy: spin_unlock_irq(&rtc_lock); return -EBUSY; } static int rtc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &rtc_async_queue); } static int rtc_release(struct inode *inode, struct file *file) { #ifdef RTC_IRQ unsigned char tmp; if (rtc_has_irq == 0) goto no_irq; /* * Turn off all interrupts once the device is no longer * in use, and clear the data. */ spin_lock_irq(&rtc_lock); if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irq(&rtc_lock); no_irq: #endif spin_lock_irq(&rtc_lock); rtc_irq_data = 0; rtc_status &= ~RTC_IS_OPEN; spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait) { unsigned long l; if (rtc_has_irq == 0) return 0; poll_wait(file, &rtc_wait, wait); spin_lock_irq(&rtc_lock); l = rtc_irq_data; spin_unlock_irq(&rtc_lock); if (l != 0) return POLLIN | POLLRDNORM; return 0; } #endif int rtc_register(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else if (task == NULL || task->func == NULL) return -EINVAL; spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) { spin_unlock_irq(&rtc_lock); return -EBUSY; } spin_lock(&rtc_task_lock); if (rtc_callback) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -EBUSY; } rtc_status |= RTC_IS_OPEN; rtc_callback = task; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_register); int rtc_unregister(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else unsigned char tmp; spin_lock_irq(&rtc_lock); spin_lock(&rtc_task_lock); if (rtc_callback != task) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -ENXIO; } rtc_callback = NULL; /* disable controls */ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } rtc_status &= ~RTC_IS_OPEN; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_unregister); int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) { #ifndef RTC_IRQ return -EIO; #else unsigned long flags; if (cmd != RTC_PIE_ON && cmd != RTC_PIE_OFF && cmd != RTC_IRQP_SET) return -EINVAL; spin_lock_irqsave(&rtc_task_lock, flags); if (rtc_callback != task) { spin_unlock_irqrestore(&rtc_task_lock, flags); return -ENXIO; } spin_unlock_irqrestore(&rtc_task_lock, flags); return rtc_do_ioctl(cmd, arg, 1); #endif } EXPORT_SYMBOL(rtc_control); /* * The various file operations we support. */ static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = rtc_read, #ifdef RTC_IRQ .poll = rtc_poll, #endif .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, .fasync = rtc_fasync, }; static struct miscdevice rtc_dev = { .minor = RTC_MINOR, .name = "rtc", .fops = &rtc_fops, }; #ifdef CONFIG_PROC_FS static const struct file_operations rtc_proc_fops = { .owner = THIS_MODULE, .open = rtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static resource_size_t rtc_size; static struct resource * __init rtc_request_region(resource_size_t size) { struct resource *r; if (RTC_IOMAPPED) r = request_region(RTC_PORT(0), size, "rtc"); else r = request_mem_region(RTC_PORT(0), size, "rtc"); if (r) rtc_size = size; return r; } static void rtc_release_region(void) { if (RTC_IOMAPPED) release_region(RTC_PORT(0), rtc_size); else release_mem_region(RTC_PORT(0), rtc_size); } static int __init rtc_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *ent; #endif #if defined(__alpha__) || defined(__mips__) unsigned int year, ctrl; char *guess = NULL; #endif #ifdef CONFIG_SPARC32 struct device_node *ebus_dp; struct of_device *op; #else void *r; #ifdef RTC_IRQ irq_handler_t rtc_int_handler_ptr; #endif #endif #ifdef CONFIG_SPARC32 for_each_node_by_name(ebus_dp, "ebus") { struct device_node *dp; for (dp = ebus_dp; dp; dp = dp->sibling) { if (!strcmp(dp->name, "rtc")) { op = of_find_device_by_node(dp); if (op) { rtc_port = op->resource[0].start; rtc_irq = op->irqs[0]; goto found; } } } } rtc_has_irq = 0; printk(KERN_ERR "rtc_init: no PC rtc found\n"); return -EIO; found: if (!rtc_irq) { rtc_has_irq = 0; goto no_irq; } /* * XXX Interrupt pin #7 in Espresso is shared between RTC and * PCI Slot 2 INTA# (and some INTx# in Slot 1). */ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { rtc_has_irq = 0; printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); return -EIO; } no_irq: #else r = rtc_request_region(RTC_IO_EXTENT); /* * If we've already requested a smaller range (for example, because * PNPBIOS or ACPI told us how the device is configured), the request * above might fail because it's too big. * * If so, request just the range we actually use. */ if (!r) r = rtc_request_region(RTC_IO_EXTENT_USED); if (!r) { #ifdef RTC_IRQ rtc_has_irq = 0; #endif printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", (long)(RTC_PORT(0))); return -EIO; } #ifdef RTC_IRQ if (is_hpet_enabled()) { int err; rtc_int_handler_ptr = hpet_rtc_interrupt; err = hpet_register_irq_handler(rtc_interrupt); if (err != 0) { printk(KERN_WARNING "hpet_register_irq_handler failed " "in rtc_init()."); return err; } } else { rtc_int_handler_ptr = rtc_interrupt; } if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ rtc_has_irq = 0; printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); rtc_release_region(); return -EIO; } hpet_rtc_timer_init(); #endif #endif /* CONFIG_SPARC32 vs. others */ if (misc_register(&rtc_dev)) { #ifdef RTC_IRQ free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(rtc_interrupt); rtc_has_irq = 0; #endif rtc_release_region(); return -ENODEV; } #ifdef CONFIG_PROC_FS ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); if (!ent) printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); #endif #if defined(__alpha__) || defined(__mips__) rtc_freq = HZ; /* Each operating system on an Alpha uses its own epoch. Let's try to guess which one we are using now. */ if (rtc_is_updating() != 0) msleep(20); spin_lock_irq(&rtc_lock); year = CMOS_READ(RTC_YEAR); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) year = bcd2bin(year); /* This should never happen... */ if (year < 20) { epoch = 2000; guess = "SRM (post-2000)"; } else if (year >= 20 && year < 48) { epoch = 1980; guess = "ARC console"; } else if (year >= 48 && year < 72) { epoch = 1952; guess = "Digital UNIX"; #if defined(__mips__) } else if (year >= 72 && year < 74) { epoch = 2000; guess = "Digital DECstation"; #else } else if (year >= 70) { epoch = 1900; guess = "Standard PC (1900)"; #endif } if (guess) printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); #endif #ifdef RTC_IRQ if (rtc_has_irq == 0) goto no_irq2; spin_lock_irq(&rtc_lock); rtc_freq = 1024; if (!hpet_set_periodic_freq(rtc_freq)) { /* * Initialize periodic frequency to CMOS reset default, * which is 1024Hz */ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); } spin_unlock_irq(&rtc_lock); no_irq2: #endif (void) init_sysctl(); printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n"); return 0; } static void __exit rtc_exit(void) { cleanup_sysctl(); remove_proc_entry("driver/rtc", NULL); misc_deregister(&rtc_dev); #ifdef CONFIG_SPARC32 if (rtc_has_irq) free_irq(rtc_irq, &rtc_port); #else rtc_release_region(); #ifdef RTC_IRQ if (rtc_has_irq) { free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(hpet_rtc_interrupt); } #endif #endif /* CONFIG_SPARC32 */ } module_init(rtc_init); module_exit(rtc_exit); #ifdef RTC_IRQ /* * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. * (usually during an IDE disk interrupt, with IRQ unmasking off) * Since the interrupt handler doesn't get called, the IRQ status * byte doesn't get read, and the RTC stops generating interrupts. * A timer is set, and will call this function if/when that happens. * To get it out of this stalled state, we just read the status. * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. * (You *really* shouldn't be trying to use a non-realtime system * for something that requires a steady > 1KHz signal anyways.) */ static void rtc_dropped_irq(unsigned long data) { unsigned long freq; spin_lock_irq(&rtc_lock); if (hpet_rtc_dropped_irq()) { spin_unlock_irq(&rtc_lock); return; } /* Just in case someone disabled the timer from behind our back... */ if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_irq_data += ((rtc_freq/HZ)<<8); rtc_irq_data &= ~0xff; rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ freq = rtc_freq; spin_unlock_irq(&rtc_lock); if (printk_ratelimit()) { printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); } /* Now we have new data */ wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); } #endif #ifdef CONFIG_PROC_FS /* * Info exported via "/proc/driver/rtc". */ static int rtc_proc_show(struct seq_file *seq, void *v) { #define YN(bit) ((ctrl & bit) ? "yes" : "no") #define NY(bit) ((ctrl & bit) ? "no" : "yes") struct rtc_time tm; unsigned char batt, ctrl; unsigned long freq; spin_lock_irq(&rtc_lock); batt = CMOS_READ(RTC_VALID) & RTC_VRT; ctrl = CMOS_READ(RTC_CONTROL); freq = rtc_freq; spin_unlock_irq(&rtc_lock); rtc_get_rtc_time(&tm); /* * There is no way to tell if the luser has the RTC set for local * time or for Universal Standard Time (GMT). Probably local though. */ seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" "rtc_epoch\t: %04lu\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch); get_rtc_alm_time(&tm); /* * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will * match any value for that particular field. Values that are * greater than a valid time, but less than 0xc0 shouldn't appear. */ seq_puts(seq, "alarm\t\t: "); if (tm.tm_hour <= 24) seq_printf(seq, "%02d:", tm.tm_hour); else seq_puts(seq, "**:"); if (tm.tm_min <= 59) seq_printf(seq, "%02d:", tm.tm_min); else seq_puts(seq, "**:"); if (tm.tm_sec <= 59) seq_printf(seq, "%02d\n", tm.tm_sec); else seq_puts(seq, "**\n"); seq_printf(seq, "DST_enable\t: %s\n" "BCD\t\t: %s\n" "24hr\t\t: %s\n" "square_wave\t: %s\n" "alarm_IRQ\t: %s\n" "update_IRQ\t: %s\n" "periodic_IRQ\t: %s\n" "periodic_freq\t: %ld\n" "batt_status\t: %s\n", YN(RTC_DST_EN), NY(RTC_DM_BINARY), YN(RTC_24H), YN(RTC_SQWE), YN(RTC_AIE), YN(RTC_UIE), YN(RTC_PIE), freq, batt ? "okay" : "dead"); return 0; #undef YN #undef NY } static int rtc_proc_open(struct inode *inode, struct file *file) { return single_open(file, rtc_proc_show, NULL); } #endif static void rtc_get_rtc_time(struct rtc_time *rtc_tm) { unsigned long uip_watchdog = jiffies, flags; unsigned char ctrl; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif /* * read RTC once any update in progress is done. The update * can take just over 2ms. We wait 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ while (rtc_is_updating() != 0 && time_before(jiffies, uip_watchdog + 2*HZ/100)) cpu_relax(); /* * Only the values that we read from the RTC are set. We leave * tm_wday, tm_yday and tm_isdst untouched. Note that while the * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is * only updated by the RTC when initially set to a non-zero value. */ spin_lock_irqsave(&rtc_lock, flags); rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); rtc_tm->tm_year = CMOS_READ(RTC_YEAR); /* Only set from 2.6.16 onwards */ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK); #ifdef CONFIG_MACH_DECSTATION real_year = CMOS_READ(RTC_DEC_YEAR); #endif ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irqrestore(&rtc_lock, flags); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday); } #ifdef CONFIG_MACH_DECSTATION rtc_tm->tm_year += real_year - 72; #endif /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ rtc_tm->tm_year += epoch - 1900; if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; } static void get_rtc_alm_time(struct rtc_time *alm_tm) { unsigned char ctrl; /* * Only the values that we read from the RTC are set. That * means only tm_hour, tm_min, and tm_sec. */ spin_lock_irq(&rtc_lock); alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM); alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec); alm_tm->tm_min = bcd2bin(alm_tm->tm_min); alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour); } } #ifdef RTC_IRQ /* * Used to disable/enable interrupts for any one of UIE, AIE, PIE. * Rumour has it that if you frob the interrupt enable/disable * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to * ensure you actually start getting interrupts. Probably for * compatibility with older/broken chipset RTC implementations. * We also clear out any old irq data after an ioctl() that * meddles with the interrupt enable/disable bits. */ static void mask_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_mask_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val &= ~bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } static void set_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_set_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val |= bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } #endif MODULE_AUTHOR("Paul Gortmaker"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(RTC_MINOR);
gpl-2.0
halaszk/halaszk-universal5430
drivers/gpu/drm/drm_proc.c
2135
5849
/** * \file drm_proc.c * /proc support for DRM * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> * * \par Acknowledgements: * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix * the problem with the proc files not outputting all their information. */ /* * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/export.h> #include <drm/drmP.h> /*************************************************** * Initialization, etc. **************************************************/ /** * Proc file list. */ static const struct drm_info_list drm_proc_list[] = { {"name", drm_name_info, 0}, {"vm", drm_vm_info, 0}, {"clients", drm_clients_info, 0}, {"bufs", drm_bufs_info, 0}, {"gem_names", drm_gem_name_info, DRIVER_GEM}, #if DRM_DEBUG_CODE {"vma", drm_vma_info, 0}, #endif }; #define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list) static int drm_proc_open(struct inode *inode, struct file *file) { struct drm_info_node* node = PDE_DATA(inode); return single_open(file, node->info_ent->show, node); } static const struct file_operations drm_proc_fops = { .owner = THIS_MODULE, .open = drm_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * Initialize a given set of proc files for a device * * \param files The array of files to create * \param count The number of files given * \param root DRI proc dir entry. * \param minor device minor number * \return Zero on success, non-zero on failure * * Create a given set of proc files represented by an array of * gdm_proc_lists in the given root directory. */ static int drm_proc_create_files(const struct drm_info_list *files, int count, struct proc_dir_entry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct proc_dir_entry *ent; struct drm_info_node *tmp; int i; for (i = 0; i < count; i++) { u32 features = files[i].driver_features; if (features != 0 && (dev->driver->driver_features & features) != features) continue; tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); if (!tmp) return -1; tmp->minor = minor; tmp->info_ent = &files[i]; list_add(&tmp->list, &minor->proc_nodes.list); ent = proc_create_data(files[i].name, S_IRUGO, root, &drm_proc_fops, tmp); if (!ent) { DRM_ERROR("Cannot create /proc/dri/%u/%s\n", minor->index, files[i].name); list_del(&tmp->list); kfree(tmp); return -1; } } return 0; } /** * Initialize the DRI proc filesystem for a device * * \param dev DRM device * \param root DRI proc dir entry. * \param dev_root resulting DRI device proc dir entry. * \return root entry pointer on success, or NULL on failure. * * Create the DRI proc root entry "/proc/dri", the device proc root entry * "/proc/dri/%minor%/", and each entry in proc_list as * "/proc/dri/%minor%/%name%". */ int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root) { char name[12]; int ret; INIT_LIST_HEAD(&minor->proc_nodes.list); sprintf(name, "%u", minor->index); minor->proc_root = proc_mkdir(name, root); if (!minor->proc_root) { DRM_ERROR("Cannot create /proc/dri/%s\n", name); return -1; } ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES, minor->proc_root, minor); if (ret) { remove_proc_subtree(name, root); minor->proc_root = NULL; DRM_ERROR("Failed to create core drm proc files\n"); return ret; } return 0; } static int drm_proc_remove_files(const struct drm_info_list *files, int count, struct drm_minor *minor) { struct list_head *pos, *q; struct drm_info_node *tmp; int i; for (i = 0; i < count; i++) { list_for_each_safe(pos, q, &minor->proc_nodes.list) { tmp = list_entry(pos, struct drm_info_node, list); if (tmp->info_ent == &files[i]) { remove_proc_entry(files[i].name, minor->proc_root); list_del(pos); kfree(tmp); } } } return 0; } /** * Cleanup the proc filesystem resources. * * \param minor device minor number. * \param root DRI proc dir entry. * \param dev_root DRI device proc dir entry. * \return always zero. * * Remove all proc entries created by proc_init(). */ int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) { char name[64]; if (!root || !minor->proc_root) return 0; drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor); sprintf(name, "%d", minor->index); remove_proc_subtree(name, root); return 0; }
gpl-2.0
javilonas/Lonas_KL-SM-G901F
drivers/video/exynos/exynos_dp_reg.c
2391
32137
/* * Samsung DP (Display port) register interface driver. * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Jingoo Han <jg1.han@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/device.h> #include <linux/io.h> #include <linux/delay.h> #include <video/exynos_dp.h> #include "exynos_dp_core.h" #include "exynos_dp_reg.h" #define COMMON_INT_MASK_1 0 #define COMMON_INT_MASK_2 0 #define COMMON_INT_MASK_3 0 #define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) #define INT_STA_MASK INT_HPD void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg |= HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } else { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg &= ~HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } } void exynos_dp_stop_video(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg &= ~VIDEO_EN; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; else reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); } void exynos_dp_init_analog_param(struct exynos_dp_device *dp) { u32 reg; reg = TX_TERMINAL_CTRL_50_OHM; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1); reg = SEL_24M | TX_DVDD_BIT_1_0625V; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2); reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | TX_CUR1_2X | TX_CUR_16_MA; writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); reg = CH3_AMP_400_MV | CH2_AMP_400_MV | CH1_AMP_400_MV | CH0_AMP_400_MV; writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL); } void exynos_dp_init_interrupt(struct exynos_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL); /* Clear pending regisers */ writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2); writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3); writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA); /* 0:mask,1: unmask */ writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK); } void exynos_dp_reset(struct exynos_dp_device *dp) { u32 reg; exynos_dp_stop_video(dp); exynos_dp_enable_video_mute(dp, 0); reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | HDCP_FUNC_EN_N | SW_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); usleep_range(20, 30); exynos_dp_lane_swap(dp, 0); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1); writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3); writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4); writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL); writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL); writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L); writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H); writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL); writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST); writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD); writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN); writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH); writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH); writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } void exynos_dp_swreset(struct exynos_dp_device *dp) { writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); } void exynos_dp_config_interrupt(struct exynos_dp_device *dp) { u32 reg; /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_1; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); reg = COMMON_INT_MASK_2; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); reg = COMMON_INT_MASK_3; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); reg = COMMON_INT_MASK_4; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); reg = INT_STA_MASK; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK); } enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); if (reg & PLL_LOCK) return PLL_LOCKED; else return PLL_UNLOCKED; } void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); reg |= DP_PLL_PD; writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); } else { reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); reg &= ~DP_PLL_PD; writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); } } void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, enum analog_power_block block, bool enable) { u32 reg; switch (block) { case AUX_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= AUX_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~AUX_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH0_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH1_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH1_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH1_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH2_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH2_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH2_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case CH3_BLOCK: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= CH3_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~CH3_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case ANALOG_TOTAL: if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg |= DP_PHY_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); reg &= ~DP_PHY_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } break; case POWER_ALL: if (enable) { reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | CH1_PD | CH0_PD; writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); } else { writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD); } break; default: break; } } void exynos_dp_init_analog_func(struct exynos_dp_device *dp) { u32 reg; int timeout_loop = 0; exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); reg = PLL_LOCK_CHG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); /* Power up PLL */ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { exynos_dp_set_pll_power_down(dp, 0); while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { timeout_loop++; if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { dev_err(dp->dev, "failed to get pll lock status\n"); return; } usleep_range(10, 20); } } /* Enable Serdes FIFO function and Link symbol clock domain module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N | AUX_FUNC_EN_N); writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp) { u32 reg; reg = HOTPLUG_CHG | HPD_LOST | PLUG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); reg = INT_HPD; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); } void exynos_dp_init_hpd(struct exynos_dp_device *dp) { u32 reg; exynos_dp_clear_hotplug_interrupts(dp); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg &= ~(F_HPD | HPD_CTRL); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); } enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp) { u32 reg; /* Parse hotplug interrupt status register */ reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); if (reg & PLUG) return DP_IRQ_TYPE_HP_CABLE_IN; if (reg & HPD_LOST) return DP_IRQ_TYPE_HP_CABLE_OUT; if (reg & HOTPLUG_CHG) return DP_IRQ_TYPE_HP_CHANGE; return DP_IRQ_TYPE_UNKNOWN; } void exynos_dp_reset_aux(struct exynos_dp_device *dp) { u32 reg; /* Disable AUX channel module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg |= AUX_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } void exynos_dp_init_aux(struct exynos_dp_device *dp) { u32 reg; /* Clear inerrupts related to AUX channel */ reg = RPLY_RECEIV | AUX_ERR; writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); exynos_dp_reset_aux(dp); /* Disable AUX transaction H/W retry */ reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)| AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL) ; /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ reg = DEFER_CTRL_EN | DEFER_COUNT(1); writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL); /* Enable AUX channel module */ reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); reg &= ~AUX_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); } int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); if (reg & HPD_STATUS) return 0; return -EINVAL; } void exynos_dp_enable_sw_function(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg &= ~SW_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); } int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp) { int reg; int retval = 0; int timeout_loop = 0; /* Enable AUX CH operation */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); reg |= AUX_EN; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); /* Is AUX CH command reply received? */ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); while (!(reg & RPLY_RECEIV)) { timeout_loop++; if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { dev_err(dp->dev, "AUX CH command reply failed!\n"); return -ETIMEDOUT; } reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); usleep_range(10, 11); } /* Clear interrupt source for AUX CH command reply */ writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA); /* Clear interrupt source for AUX CH access error */ reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); if (reg & AUX_ERR) { writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA); return -EREMOTEIO; } /* Check AUX CH error access status */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA); if ((reg & AUX_STATUS_MASK) != 0) { dev_err(dp->dev, "AUX CH error happens: %d\n\n", reg & AUX_STATUS_MASK); return -EREMOTEIO; } return retval; } int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned char data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* Write data buffer */ reg = (unsigned int)data; writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0); /* * Set DisplayPort transaction and write 1 byte * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } return retval; } int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned char *data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* * Set DisplayPort transaction and read 1 byte * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Read data buffer */ reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); *data = (unsigned char)(reg & 0xff); return retval; } int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned int count, unsigned char data[]) { u32 reg; unsigned int start_offset; unsigned int cur_data_count; unsigned int cur_data_idx; int i; int retval = 0; /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); start_offset = 0; while (start_offset < count) { /* Buffer size of AUX CH is 16 * 4bytes */ if ((count - start_offset) > 16) cur_data_count = 16; else cur_data_count = count - start_offset; for (i = 0; i < 3; i++) { /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); for (cur_data_idx = 0; cur_data_idx < cur_data_count; cur_data_idx++) { reg = data[start_offset + cur_data_idx]; writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); } /* * Set DisplayPort transaction and write * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(cur_data_count) | AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } start_offset += cur_data_count; } return retval; } int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, unsigned int reg_addr, unsigned int count, unsigned char data[]) { u32 reg; unsigned int start_offset; unsigned int cur_data_count; unsigned int cur_data_idx; int i; int retval = 0; /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); start_offset = 0; while (start_offset < count) { /* Buffer size of AUX CH is 16 * 4bytes */ if ((count - start_offset) > 16) cur_data_count = 16; else cur_data_count = count - start_offset; /* AUX CH Request Transaction process */ for (i = 0; i < 3; i++) { /* Select DPCD device address */ reg = AUX_ADDR_7_0(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(reg_addr + start_offset); writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* * Set DisplayPort transaction and read * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(cur_data_count) | AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } for (cur_data_idx = 0; cur_data_idx < cur_data_count; cur_data_idx++) { reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); data[start_offset + cur_data_idx] = (unsigned char)reg; } start_offset += cur_data_count; } return retval; } int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr) { u32 reg; int retval; /* Set EDID device address */ reg = device_addr; writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); /* Set offset from base address of EDID device */ writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0); /* * Set I2C transaction and write address * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT | AUX_TX_COMM_WRITE; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval != 0) dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); return retval; } int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr, unsigned int *data) { u32 reg; int i; int retval; for (i = 0; i < 3; i++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Select EDID device */ retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr); if (retval != 0) continue; /* * Set I2C transaction and read data * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Read data */ if (retval == 0) *data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); return retval; } int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, unsigned int device_addr, unsigned int reg_addr, unsigned int count, unsigned char edid[]) { u32 reg; unsigned int i, j; unsigned int cur_data_idx; unsigned int defer = 0; int retval = 0; for (i = 0; i < count; i += 16) { for (j = 0; j < 3; j++) { /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); /* Set normal AUX CH command */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); reg &= ~ADDR_ONLY; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); /* * If Rx sends defer, Tx sends only reads * request without sending address */ if (!defer) retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr + i); else defer = 0; if (retval == 0) { /* * Set I2C transaction and write data * If bit 3 is 1, DisplayPort transaction. * If Bit 3 is 0, I2C transaction. */ reg = AUX_LENGTH(16) | AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_READ; writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); /* Start AUX transaction */ retval = exynos_dp_start_aux_transaction(dp); if (retval == 0) break; else dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); } /* Check if Rx sends defer */ reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM); if (reg == AUX_RX_COMM_AUX_DEFER || reg == AUX_RX_COMM_I2C_DEFER) { dev_err(dp->dev, "Defer: %d\n\n", reg); defer = 1; } } for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) { reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 + 4 * cur_data_idx); edid[i + cur_data_idx] = (unsigned char)reg; } } return retval; } void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype) { u32 reg; reg = bwtype; if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS)) writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET); } void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET); *bwtype = reg; } void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count) { u32 reg; reg = count; writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); } void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); *count = reg; } void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg |= ENHANCED; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); } else { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg &= ~ENHANCED; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); } } void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern) { u32 reg; switch (pattern) { case PRBS7: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case D10_2: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case TRAINING_PTN1: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case TRAINING_PTN2: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; case DP_NONE: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_DISABLE | SW_TRAINING_PATTERN_SET_NORMAL; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); break; default: break; } } void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); } void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); } void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); } void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); } void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); } void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); } void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); } void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); } u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); return reg; } u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); return reg; } void exynos_dp_reset_macro(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST); reg |= MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); /* 10 us is the minimum reset time. */ usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); } void exynos_dp_init_video(struct exynos_dp_device *dp) { u32 reg; reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); reg = 0x0; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); reg = CHA_CRI(4) | CHA_CTRL; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); reg = 0x0; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg = VID_HRES_TH(2) | VID_VRES_TH(0); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8); } void exynos_dp_set_video_color_format(struct exynos_dp_device *dp) { u32 reg; /* Configure the input color depth, color space, dynamic range */ reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) | (dp->video_info->color_depth << IN_BPC_SHIFT) | (dp->video_info->color_space << IN_COLOR_F_SHIFT); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2); /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); reg &= ~IN_YC_COEFFI_MASK; if (dp->video_info->ycbcr_coeff) reg |= IN_YC_COEFFI_ITU709; else reg |= IN_YC_COEFFI_ITU601; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); } int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); if (!(reg & DET_STA)) { dev_dbg(dp->dev, "Input stream clock not detected.\n"); return -EINVAL; } reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); if (reg & CHA_STA) { dev_dbg(dp->dev, "Input stream clk is changing\n"); return -EINVAL; } return 0; } void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, enum clock_recovery_m_value_type type, u32 m_value, u32 n_value) { u32 reg; if (type == REGISTER_M) { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg |= FIX_M_VID; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg = m_value & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0); reg = (m_value >> 8) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1); reg = (m_value >> 16) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2); reg = n_value & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0); reg = (n_value >> 8) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1); reg = (n_value >> 16) & 0xff; writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2); } else { reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); reg &= ~FIX_M_VID; writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0); writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1); writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2); } } void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type) { u32 reg; if (type == VIDEO_TIMING_FROM_CAPTURE) { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~FORMAT_SEL; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); } else { reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg |= FORMAT_SEL; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); } } void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } else { reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } } void exynos_dp_start_video(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); reg |= VIDEO_EN; writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); } int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); if (!(reg & STRM_VALID)) { dev_dbg(dp->dev, "Input video stream is not detected.\n"); return -EINVAL; } return 0; } void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N); reg |= MASTER_VID_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~INTERACE_SCAN_CFG; reg |= (dp->video_info->interlaced << 2); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~VSYNC_POLARITY_CFG; reg |= (dp->video_info->v_sync_polarity << 1); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg &= ~HSYNC_POLARITY_CFG; reg |= (dp->video_info->h_sync_polarity << 0); writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); } void exynos_dp_enable_scrambling(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); reg &= ~SCRAMBLING_DISABLE; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); } void exynos_dp_disable_scrambling(struct exynos_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); reg |= SCRAMBLING_DISABLE; writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); }
gpl-2.0
antmicro/linux-sunxi
arch/sparc/kernel/sbus.c
4439
20505
/* * sbus.c: UltraSparc SBUS controller support. * * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/page.h> #include <asm/io.h> #include <asm/upa.h> #include <asm/cache.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/oplib.h> #include <asm/starfire.h> #include "iommu_common.h" #define MAP_BASE ((u32)0xc0000000) /* Offsets from iommu_regs */ #define SYSIO_IOMMUREG_BASE 0x2400UL #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */ #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */ #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */ #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */ #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */ #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */ #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */ #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */ #define IOMMU_DRAM_VALID (1UL << 30UL) /* Offsets from strbuf_regs */ #define SYSIO_STRBUFREG_BASE 0x2800UL #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */ #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */ #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */ #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */ #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */ #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */ #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */ #define STRBUF_TAG_VALID 0x02UL /* Enable 64-bit DVMA mode for the given device. */ void sbus_set_sbus64(struct device *dev, int bursts) { struct iommu *iommu = dev->archdata.iommu; struct platform_device *op = to_platform_device(dev); const struct linux_prom_registers *regs; unsigned long cfg_reg; int slot; u64 val; regs = of_get_property(op->dev.of_node, "reg", NULL); if (!regs) { printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n", op->dev.of_node->full_name); return; } slot = regs->which_io; cfg_reg = iommu->write_complete_reg; switch (slot) { case 0: cfg_reg += 0x20UL; break; case 1: cfg_reg += 0x28UL; break; case 2: cfg_reg += 0x30UL; break; case 3: cfg_reg += 0x38UL; break; case 13: cfg_reg += 0x40UL; break; case 14: cfg_reg += 0x48UL; break; case 15: cfg_reg += 0x50UL; break; default: return; } val = upa_readq(cfg_reg); if (val & (1UL << 14UL)) { /* Extended transfer mode already enabled. */ return; } val |= (1UL << 14UL); if (bursts & DMA_BURST8) val |= (1UL << 1UL); if (bursts & DMA_BURST16) val |= (1UL << 2UL); if (bursts & DMA_BURST32) val |= (1UL << 3UL); if (bursts & DMA_BURST64) val |= (1UL << 4UL); upa_writeq(val, cfg_reg); } EXPORT_SYMBOL(sbus_set_sbus64); /* INO number to IMAP register offset for SYSIO external IRQ's. * This should conform to both Sunfire/Wildfire server and Fusion * desktop designs. */ #define SYSIO_IMAP_SLOT0 0x2c00UL #define SYSIO_IMAP_SLOT1 0x2c08UL #define SYSIO_IMAP_SLOT2 0x2c10UL #define SYSIO_IMAP_SLOT3 0x2c18UL #define SYSIO_IMAP_SCSI 0x3000UL #define SYSIO_IMAP_ETH 0x3008UL #define SYSIO_IMAP_BPP 0x3010UL #define SYSIO_IMAP_AUDIO 0x3018UL #define SYSIO_IMAP_PFAIL 0x3020UL #define SYSIO_IMAP_KMS 0x3028UL #define SYSIO_IMAP_FLPY 0x3030UL #define SYSIO_IMAP_SHW 0x3038UL #define SYSIO_IMAP_KBD 0x3040UL #define SYSIO_IMAP_MS 0x3048UL #define SYSIO_IMAP_SER 0x3050UL #define SYSIO_IMAP_TIM0 0x3060UL #define SYSIO_IMAP_TIM1 0x3068UL #define SYSIO_IMAP_UE 0x3070UL #define SYSIO_IMAP_CE 0x3078UL #define SYSIO_IMAP_SBERR 0x3080UL #define SYSIO_IMAP_PMGMT 0x3088UL #define SYSIO_IMAP_GFX 0x3090UL #define SYSIO_IMAP_EUPA 0x3098UL #define bogon ((unsigned long) -1) static unsigned long sysio_irq_offsets[] = { /* SBUS Slot 0 --> 3, level 1 --> 7 */ SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, /* Onboard devices (not relevant/used on SunFire). */ SYSIO_IMAP_SCSI, SYSIO_IMAP_ETH, SYSIO_IMAP_BPP, bogon, SYSIO_IMAP_AUDIO, SYSIO_IMAP_PFAIL, bogon, bogon, SYSIO_IMAP_KMS, SYSIO_IMAP_FLPY, SYSIO_IMAP_SHW, SYSIO_IMAP_KBD, SYSIO_IMAP_MS, SYSIO_IMAP_SER, bogon, bogon, SYSIO_IMAP_TIM0, SYSIO_IMAP_TIM1, bogon, bogon, SYSIO_IMAP_UE, SYSIO_IMAP_CE, SYSIO_IMAP_SBERR, SYSIO_IMAP_PMGMT, }; #undef bogon #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets) /* Convert Interrupt Mapping register pointer to associated * Interrupt Clear register pointer, SYSIO specific version. */ #define SYSIO_ICLR_UNUSED0 0x3400UL #define SYSIO_ICLR_SLOT0 0x3408UL #define SYSIO_ICLR_SLOT1 0x3448UL #define SYSIO_ICLR_SLOT2 0x3488UL #define SYSIO_ICLR_SLOT3 0x34c8UL static unsigned long sysio_imap_to_iclr(unsigned long imap) { unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0; return imap + diff; } static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino) { struct iommu *iommu = op->dev.archdata.iommu; unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; unsigned long imap, iclr; int sbus_level = 0; imap = sysio_irq_offsets[ino]; if (imap == ((unsigned long)-1)) { prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n", ino); prom_halt(); } imap += reg_base; /* SYSIO inconsistency. For external SLOTS, we have to select * the right ICLR register based upon the lower SBUS irq level * bits. */ if (ino >= 0x20) { iclr = sysio_imap_to_iclr(imap); } else { int sbus_slot = (ino & 0x18)>>3; sbus_level = ino & 0x7; switch(sbus_slot) { case 0: iclr = reg_base + SYSIO_ICLR_SLOT0; break; case 1: iclr = reg_base + SYSIO_ICLR_SLOT1; break; case 2: iclr = reg_base + SYSIO_ICLR_SLOT2; break; default: case 3: iclr = reg_base + SYSIO_ICLR_SLOT3; break; } iclr += ((unsigned long)sbus_level - 1UL) * 8UL; } return build_irq(sbus_level, iclr, imap); } /* Error interrupt handling. */ #define SYSIO_UE_AFSR 0x0030UL #define SYSIO_UE_AFAR 0x0038UL #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */ #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */ #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */ #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ static irqreturn_t sysio_ue_handler(int irq, void *dev_id) { struct platform_device *op = dev_id; struct iommu *iommu = op->dev.archdata.iommu; unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; unsigned long afsr_reg, afar_reg; unsigned long afsr, afar, error_bits; int reported, portid; afsr_reg = reg_base + SYSIO_UE_AFSR; afar_reg = reg_base + SYSIO_UE_AFAR; /* Latch error status. */ afsr = upa_readq(afsr_reg); afar = upa_readq(afar_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR | SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR); upa_writeq(error_bits, afsr_reg); portid = of_getintprop_default(op->dev.of_node, "portid", -1); /* Log the error. */ printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n", portid, (((error_bits & SYSIO_UEAFSR_PPIO) ? "PIO" : ((error_bits & SYSIO_UEAFSR_PDRD) ? "DVMA Read" : ((error_bits & SYSIO_UEAFSR_PDWR) ? "DVMA Write" : "???"))))); printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n", portid, (afsr & SYSIO_UEAFSR_DOFF) >> 45UL, (afsr & SYSIO_UEAFSR_SIZE) >> 42UL, (afsr & SYSIO_UEAFSR_MID) >> 37UL); printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); printk("SYSIO[%x]: Secondary UE errors [", portid); reported = 0; if (afsr & SYSIO_UEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SYSIO_UEAFSR_SDRD) { reported++; printk("(DVMA Read)"); } if (afsr & SYSIO_UEAFSR_SDWR) { reported++; printk("(DVMA Write)"); } if (!reported) printk("(none)"); printk("]\n"); return IRQ_HANDLED; } #define SYSIO_CE_AFSR 0x0040UL #define SYSIO_CE_AFAR 0x0048UL #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */ #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */ #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */ #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */ #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */ #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/ #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */ #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */ #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */ #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */ #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */ #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */ static irqreturn_t sysio_ce_handler(int irq, void *dev_id) { struct platform_device *op = dev_id; struct iommu *iommu = op->dev.archdata.iommu; unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; unsigned long afsr_reg, afar_reg; unsigned long afsr, afar, error_bits; int reported, portid; afsr_reg = reg_base + SYSIO_CE_AFSR; afar_reg = reg_base + SYSIO_CE_AFAR; /* Latch error status. */ afsr = upa_readq(afsr_reg); afar = upa_readq(afar_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR | SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR); upa_writeq(error_bits, afsr_reg); portid = of_getintprop_default(op->dev.of_node, "portid", -1); printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n", portid, (((error_bits & SYSIO_CEAFSR_PPIO) ? "PIO" : ((error_bits & SYSIO_CEAFSR_PDRD) ? "DVMA Read" : ((error_bits & SYSIO_CEAFSR_PDWR) ? "DVMA Write" : "???"))))); /* XXX Use syndrome and afar to print out module string just like * XXX UDB CE trap handler does... -DaveM */ printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n", portid, (afsr & SYSIO_CEAFSR_DOFF) >> 45UL, (afsr & SYSIO_CEAFSR_ESYND) >> 48UL, (afsr & SYSIO_CEAFSR_SIZE) >> 42UL, (afsr & SYSIO_CEAFSR_MID) >> 37UL); printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); printk("SYSIO[%x]: Secondary CE errors [", portid); reported = 0; if (afsr & SYSIO_CEAFSR_SPIO) { reported++; printk("(PIO)"); } if (afsr & SYSIO_CEAFSR_SDRD) { reported++; printk("(DVMA Read)"); } if (afsr & SYSIO_CEAFSR_SDWR) { reported++; printk("(DVMA Write)"); } if (!reported) printk("(none)"); printk("]\n"); return IRQ_HANDLED; } #define SYSIO_SBUS_AFSR 0x2010UL #define SYSIO_SBUS_AFAR 0x2018UL #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */ #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */ #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */ #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */ #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */ #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */ #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */ #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */ #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */ #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */ #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */ #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */ static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id) { struct platform_device *op = dev_id; struct iommu *iommu = op->dev.archdata.iommu; unsigned long afsr_reg, afar_reg, reg_base; unsigned long afsr, afar, error_bits; int reported, portid; reg_base = iommu->write_complete_reg - 0x2000UL; afsr_reg = reg_base + SYSIO_SBUS_AFSR; afar_reg = reg_base + SYSIO_SBUS_AFAR; afsr = upa_readq(afsr_reg); afar = upa_readq(afar_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR | SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR); upa_writeq(error_bits, afsr_reg); portid = of_getintprop_default(op->dev.of_node, "portid", -1); /* Log the error. */ printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n", portid, (((error_bits & SYSIO_SBAFSR_PLE) ? "Late PIO Error" : ((error_bits & SYSIO_SBAFSR_PTO) ? "Time Out" : ((error_bits & SYSIO_SBAFSR_PBERR) ? "Error Ack" : "???")))), (afsr & SYSIO_SBAFSR_RD) ? 1 : 0); printk("SYSIO[%x]: size[%lx] MID[%lx]\n", portid, (afsr & SYSIO_SBAFSR_SIZE) >> 42UL, (afsr & SYSIO_SBAFSR_MID) >> 37UL); printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar); printk("SYSIO[%x]: Secondary SBUS errors [", portid); reported = 0; if (afsr & SYSIO_SBAFSR_SLE) { reported++; printk("(Late PIO Error)"); } if (afsr & SYSIO_SBAFSR_STO) { reported++; printk("(Time Out)"); } if (afsr & SYSIO_SBAFSR_SBERR) { reported++; printk("(Error Ack)"); } if (!reported) printk("(none)"); printk("]\n"); /* XXX check iommu/strbuf for further error status XXX */ return IRQ_HANDLED; } #define ECC_CONTROL 0x0020UL #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */ #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */ #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */ #define SYSIO_UE_INO 0x34 #define SYSIO_CE_INO 0x35 #define SYSIO_SBUSERR_INO 0x36 static void __init sysio_register_error_handlers(struct platform_device *op) { struct iommu *iommu = op->dev.archdata.iommu; unsigned long reg_base = iommu->write_complete_reg - 0x2000UL; unsigned int irq; u64 control; int portid; portid = of_getintprop_default(op->dev.of_node, "portid", -1); irq = sbus_build_irq(op, SYSIO_UE_INO); if (request_irq(irq, sysio_ue_handler, 0, "SYSIO_UE", op) < 0) { prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n", portid); prom_halt(); } irq = sbus_build_irq(op, SYSIO_CE_INO); if (request_irq(irq, sysio_ce_handler, 0, "SYSIO_CE", op) < 0) { prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n", portid); prom_halt(); } irq = sbus_build_irq(op, SYSIO_SBUSERR_INO); if (request_irq(irq, sysio_sbus_error_handler, 0, "SYSIO_SBERR", op) < 0) { prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n", portid); prom_halt(); } /* Now turn the error interrupts on and also enable ECC checking. */ upa_writeq((SYSIO_ECNTRL_ECCEN | SYSIO_ECNTRL_UEEN | SYSIO_ECNTRL_CEEN), reg_base + ECC_CONTROL); control = upa_readq(iommu->write_complete_reg); control |= 0x100UL; /* SBUS Error Interrupt Enable */ upa_writeq(control, iommu->write_complete_reg); } /* Boot time initialization. */ static void __init sbus_iommu_init(struct platform_device *op) { const struct linux_prom64_registers *pr; struct device_node *dp = op->dev.of_node; struct iommu *iommu; struct strbuf *strbuf; unsigned long regs, reg_base; int i, portid; u64 control; pr = of_get_property(dp, "reg", NULL); if (!pr) { prom_printf("sbus_iommu_init: Cannot map SYSIO " "control registers.\n"); prom_halt(); } regs = pr->phys_addr; iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); if (!iommu || !strbuf) goto fatal_memory_error; op->dev.archdata.iommu = iommu; op->dev.archdata.stc = strbuf; op->dev.archdata.numa_node = -1; reg_base = regs + SYSIO_IOMMUREG_BASE; iommu->iommu_control = reg_base + IOMMU_CONTROL; iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE; iommu->iommu_flush = reg_base + IOMMU_FLUSH; iommu->iommu_tags = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); reg_base = regs + SYSIO_STRBUFREG_BASE; strbuf->strbuf_control = reg_base + STRBUF_CONTROL; strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH; strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC; strbuf->strbuf_enabled = 1; strbuf->strbuf_flushflag = (volatile unsigned long *) ((((unsigned long)&strbuf->__flushflag_buf[0]) + 63UL) & ~63UL); strbuf->strbuf_flushflag_pa = (unsigned long) __pa(strbuf->strbuf_flushflag); /* The SYSIO SBUS control register is used for dummy reads * in order to ensure write completion. */ iommu->write_complete_reg = regs + 0x2000UL; portid = of_getintprop_default(op->dev.of_node, "portid", -1); printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n", portid, regs); /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */ if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1)) goto fatal_memory_error; control = upa_readq(iommu->iommu_control); control = ((7UL << 16UL) | (0UL << 2UL) | (1UL << 1UL) | (1UL << 0UL)); upa_writeq(control, iommu->iommu_control); /* Clean out any cruft in the IOMMU using * diagnostic accesses. */ for (i = 0; i < 16; i++) { unsigned long dram, tag; dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL); tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL); dram += (unsigned long)i * 8UL; tag += (unsigned long)i * 8UL; upa_writeq(0, dram); upa_writeq(0, tag); } upa_readq(iommu->write_complete_reg); /* Give the TSB to SYSIO. */ upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase); /* Setup streaming buffer, DE=1 SB_EN=1 */ control = (1UL << 1UL) | (1UL << 0UL); upa_writeq(control, strbuf->strbuf_control); /* Clear out the tags using diagnostics. */ for (i = 0; i < 16; i++) { unsigned long ptag, ltag; ptag = strbuf->strbuf_control + (STRBUF_PTAGDIAG - STRBUF_CONTROL); ltag = strbuf->strbuf_control + (STRBUF_LTAGDIAG - STRBUF_CONTROL); ptag += (unsigned long)i * 8UL; ltag += (unsigned long)i * 8UL; upa_writeq(0UL, ptag); upa_writeq(0UL, ltag); } /* Enable DVMA arbitration for all devices/slots. */ control = upa_readq(iommu->write_complete_reg); control |= 0x3fUL; upa_writeq(control, iommu->write_complete_reg); /* Now some Xfire specific grot... */ if (this_is_starfire) starfire_hookup(portid); sysio_register_error_handlers(op); return; fatal_memory_error: kfree(iommu); kfree(strbuf); prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); } static int __init sbus_init(void) { struct device_node *dp; for_each_node_by_name(dp, "sbus") { struct platform_device *op = of_find_device_by_node(dp); sbus_iommu_init(op); of_propagate_archdata(op); } return 0; } subsys_initcall(sbus_init);
gpl-2.0
SuperHanss/android_kernel_lge_g3
drivers/net/can/usb/peak_usb/pcan_usb_core.c
4439
22641
/* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/init.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>"); MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ static struct usb_device_id peak_usb_table[] = { {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID)}, {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, peak_usb_table); /* List of supported PCAN-USB adapters (NULL terminated list) */ static struct peak_usb_adapter *peak_usb_adapters_list[] = { &pcan_usb, &pcan_usb_pro, NULL, }; /* * dump memory */ #define DUMP_WIDTH 16 void dump_mem(char *prompt, void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); print_hex_dump(KERN_INFO, PCAN_USB_DRIVER_NAME " ", DUMP_PREFIX_NONE, DUMP_WIDTH, 1, p, l, false); } /* * initialize a time_ref object with usb adapter own settings */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, struct peak_usb_adapter *adapter) { if (time_ref) { memset(time_ref, 0, sizeof(struct peak_time_ref)); time_ref->adapter = adapter; } } static void peak_usb_add_us(struct timeval *tv, u32 delta_us) { /* number of s. to add to final time */ u32 delta_s = delta_us / 1000000; delta_us -= delta_s * 1000000; tv->tv_usec += delta_us; if (tv->tv_usec >= 1000000) { tv->tv_usec -= 1000000; delta_s++; } tv->tv_sec += delta_s; } /* * sometimes, another now may be more recent than current one... */ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { time_ref->ts_dev_2 = ts_now; /* should wait at least two passes before computing */ if (time_ref->tv_host.tv_sec > 0) { u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } } /* * register device timestamp as now */ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { if (time_ref->tv_host_0.tv_sec == 0) { /* use monotonic clock to correctly compute further deltas */ time_ref->tv_host_0 = ktime_to_timeval(ktime_get()); time_ref->tv_host.tv_sec = 0; } else { /* * delta_us should not be >= 2^32 => delta_s should be < 4294 * handle 32-bits wrapping here: if count of s. reaches 4200, * reset counters and change time base */ if (time_ref->tv_host.tv_sec != 0) { u32 delta_s = time_ref->tv_host.tv_sec - time_ref->tv_host_0.tv_sec; if (delta_s > 4200) { time_ref->tv_host_0 = time_ref->tv_host; time_ref->ts_total = 0; } } time_ref->tv_host = ktime_to_timeval(ktime_get()); time_ref->tick_count++; } time_ref->ts_dev_1 = time_ref->ts_dev_2; peak_usb_update_ts_now(time_ref, ts_now); } /* * compute timeval according to current ts and time_ref data */ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts, struct timeval *tv) { /* protect from getting timeval before setting now */ if (time_ref->tv_host.tv_sec > 0) { u64 delta_us; delta_us = ts - time_ref->ts_dev_2; if (ts < time_ref->ts_dev_2) delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; delta_us += time_ref->ts_total; delta_us *= time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *tv = time_ref->tv_host_0; peak_usb_add_us(tv, (u32)delta_us); } else { *tv = ktime_to_timeval(ktime_get()); } } /* * callback for bulk Rx urb */ static void peak_usb_read_bulk_callback(struct urb *urb) { struct peak_usb_device *dev = urb->context; struct net_device *netdev; int err; netdev = dev->netdev; if (!netif_device_present(netdev)) return; /* check reception status */ switch (urb->status) { case 0: /* success */ break; case -EILSEQ: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: return; default: if (net_ratelimit()) netdev_err(netdev, "Rx urb aborted (%d)\n", urb->status); goto resubmit_urb; } /* protect from any incoming empty msgs */ if ((urb->actual_length > 0) && (dev->adapter->dev_decode_buf)) { /* handle these kinds of msgs only if _start callback called */ if (dev->state & PCAN_USB_STATE_STARTED) { err = dev->adapter->dev_decode_buf(dev, urb); if (err) dump_mem("received usb message", urb->transfer_buffer, urb->transfer_buffer_length); } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), urb->transfer_buffer, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) return; usb_unanchor_urb(urb); if (err == -ENODEV) netif_device_detach(netdev); else netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", err); } /* * callback for bulk Tx urb */ static void peak_usb_write_bulk_callback(struct urb *urb) { struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; /* check tx status */ switch (urb->status) { case 0: /* transmission complete */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += context->dlc; /* prevent tx timeout */ netdev->trans_start = jiffies; break; default: if (net_ratelimit()) netdev_err(netdev, "Tx urb aborted (%d)\n", urb->status); case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: break; } /* should always release echo skb and corresponding context */ can_get_echo_skb(netdev, context->echo_index); context->echo_index = PCAN_USB_MAX_TX_URBS; /* do wakeup tx queue in case of success only */ if (!urb->status) netif_wake_queue(netdev); } /* * called by netdev to send one skb on the CAN interface. */ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct urb *urb; u8 *obuf; int i, err; size_t size = dev->adapter->tx_buffer_size; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) if (dev->tx_contexts[i].echo_index == PCAN_USB_MAX_TX_URBS) { context = dev->tx_contexts + i; break; } if (!context) { /* should not occur except during restart */ return NETDEV_TX_BUSY; } urb = context->urb; obuf = urb->transfer_buffer; err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size); if (err) { if (net_ratelimit()) netdev_err(netdev, "packet dropped\n"); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } context->echo_index = i; context->dlc = cf->can_dlc; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { can_free_echo_skb(netdev, context->echo_index); usb_unanchor_urb(urb); /* this context is not used in fact */ context->echo_index = PCAN_USB_MAX_TX_URBS; atomic_dec(&dev->active_tx_urbs); switch (err) { case -ENODEV: netif_device_detach(netdev); break; default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); case -ENOENT: /* cable unplugged */ stats->tx_dropped++; } } else { netdev->trans_start = jiffies; /* slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS) netif_stop_queue(netdev); } return NETDEV_TX_OK; } /* * start the CAN interface. * Rx and Tx urbs are allocated here. Rx urbs are submitted here. */ static int peak_usb_start(struct peak_usb_device *dev) { struct net_device *netdev = dev->netdev; int err, i; for (i = 0; i < PCAN_USB_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), buf, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); usb_unanchor_urb(urb); kfree(buf); usb_free_urb(urb); break; } /* drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* did we submit any URBs? Warn if we was not able to submit all urbs */ if (i < PCAN_USB_MAX_RX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any rx URB\n"); return err; } netdev_warn(netdev, "rx performance may be slow\n"); } /* pre-alloc tx buffers and corresponding urbs */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct peak_tx_urb_context *context; struct urb *urb; u8 *buf; /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); err = -ENOMEM; break; } buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } context = dev->tx_contexts + i; context->dev = dev; context->urb = urb; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->ep_msg_out), buf, dev->adapter->tx_buffer_size, peak_usb_write_bulk_callback, context); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; } /* warn if we were not able to allocate enough tx contexts */ if (i < PCAN_USB_MAX_TX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any tx URB\n"); return err; } netdev_warn(netdev, "tx performance may be slow\n"); } if (dev->adapter->dev_start) { err = dev->adapter->dev_start(dev); if (err) goto failed; } dev->state |= PCAN_USB_STATE_STARTED; /* can set bus on now */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 1); if (err) goto failed; } dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } /* * called by netdev to open the corresponding CAN interface. */ static int peak_usb_ndo_open(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = peak_usb_start(dev); if (err) { netdev_err(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } dev->open_time = jiffies; netif_start_queue(netdev); return 0; } /* * unlink in-flight Rx and Tx urbs and free their memory. */ static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev) { int i; /* free all Rx (submitted) urbs */ usb_kill_anchored_urbs(&dev->rx_submitted); /* free unsubmitted Tx urbs first */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct urb *urb = dev->tx_contexts[i].urb; if (!urb || dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) { /* * this urb is already released or always submitted, * let usb core free by itself */ continue; } usb_free_urb(urb); dev->tx_contexts[i].urb = NULL; } /* then free all submitted Tx urbs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); } /* * called by netdev to close the corresponding CAN interface. */ static int peak_usb_ndo_stop(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); close_candev(netdev); dev->open_time = 0; dev->can.state = CAN_STATE_STOPPED; /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); if (err) return err; } return 0; } /* * handle end of waiting for the device to reset */ void peak_usb_restart_complete(struct peak_usb_device *dev) { /* finally MUST update can state */ dev->can.state = CAN_STATE_ERROR_ACTIVE; /* netdev queue can be awaken now */ netif_wake_queue(dev->netdev); } void peak_usb_async_complete(struct urb *urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } /* * device (auto-)restart mechanism runs in a timer context => * MUST handle restart with asynchronous usb transfers */ static int peak_usb_restart(struct peak_usb_device *dev) { struct urb *urb; int err; u8 *buf; /* * if device doesn't define any asynchronous restart handler, simply * wake the netdev queue up */ if (!dev->adapter->dev_restart_async) { peak_usb_restart_complete(dev); return 0; } /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(dev->netdev, "no memory left for urb\n"); return -ENOMEM; } /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); if (!buf) { netdev_err(dev->netdev, "no memory left for async cmd\n"); usb_free_urb(urb); return -ENOMEM; } /* call the device specific handler for the restart */ err = dev->adapter->dev_restart_async(dev, urb, buf); if (!err) return 0; kfree(buf); usb_free_urb(urb); return err; } /* * candev callback used to change CAN mode. * Warning: this is called from a timer context! */ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; if (!dev->open_time) return -EINVAL; switch (mode) { case CAN_MODE_START: err = peak_usb_restart(dev); if (err) netdev_err(netdev, "couldn't start device (err %d)\n", err); break; default: return -EOPNOTSUPP; } return err; } /* * candev callback used to set device bitrate. */ static int peak_usb_set_bittiming(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; if (dev->adapter->dev_set_bittiming) { int err = dev->adapter->dev_set_bittiming(dev, bt); if (err) netdev_info(netdev, "couldn't set bitrate (err %d)\n", err); return err; } return 0; } static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, .ndo_start_xmit = peak_usb_ndo_start_xmit, }; /* * create one device which is attached to CAN controller #ctrl_idx of the * usb adapter. */ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, struct usb_interface *intf, int ctrl_idx) { struct usb_device *usb_dev = interface_to_usbdev(intf); int sizeof_candev = peak_usb_adapter->sizeof_dev_private; struct peak_usb_device *dev; struct net_device *netdev; int i, err; u16 tmp16; if (sizeof_candev < sizeof(struct peak_usb_device)) sizeof_candev = sizeof(struct peak_usb_device); netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "%s: couldn't alloc candev\n", PCAN_USB_DRIVER_NAME); return -ENOMEM; } dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { dev_err(&intf->dev, "%s: couldn't alloc cmd buffer\n", PCAN_USB_DRIVER_NAME); err = -ENOMEM; goto lbl_set_intf_data; } dev->udev = usb_dev; dev->netdev = netdev; dev->adapter = peak_usb_adapter; dev->ctrl_idx = ctrl_idx; dev->state = PCAN_USB_STATE_CONNECTED; dev->ep_msg_in = peak_usb_adapter->ep_msg_in; dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = &peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; netdev->netdev_ops = &peak_usb_netdev_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS; dev->prev_siblings = usb_get_intfdata(intf); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); goto lbl_free_cmd_buf; } if (dev->prev_siblings) (dev->prev_siblings)->next_siblings = dev; /* keep hw revision into the netdevice */ tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice); dev->device_rev = tmp16 >> 8; if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) goto lbl_free_cmd_buf; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) goto lbl_free_cmd_buf; } /* get device number early */ if (dev->adapter->dev_get_device_id) dev->adapter->dev_get_device_id(dev, &dev->device_number); netdev_info(netdev, "attached to %s channel %u (device %u)\n", peak_usb_adapter->name, ctrl_idx, dev->device_number); return 0; lbl_free_cmd_buf: kfree(dev->cmd_buf); lbl_set_intf_data: usb_set_intfdata(intf, dev->prev_siblings); free_candev(netdev); return err; } /* * called by the usb core when the device is unplugged from the system */ static void peak_usb_disconnect(struct usb_interface *intf) { struct peak_usb_device *dev; /* unregister as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; char name[IFNAMSIZ]; dev->state &= ~PCAN_USB_STATE_CONNECTED; strncpy(name, netdev->name, IFNAMSIZ); unregister_netdev(netdev); free_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; if (dev->adapter->dev_free) dev->adapter->dev_free(dev); dev_info(&intf->dev, "%s removed\n", name); } usb_set_intfdata(intf, NULL); } /* * probe function for new PEAK-System devices */ static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); struct peak_usb_adapter *peak_usb_adapter, **pp; int i, err = -ENOMEM; usb_dev = interface_to_usbdev(intf); /* get corresponding PCAN-USB adapter */ for (pp = peak_usb_adapters_list; *pp; pp++) if ((*pp)->device_id == usb_dev->descriptor.idProduct) break; peak_usb_adapter = *pp; if (!peak_usb_adapter) { /* should never come except device_id bad usage in this file */ pr_err("%s: didn't find device id. 0x%x in devices list\n", PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); return -ENODEV; } /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { err = peak_usb_adapter->intf_probe(intf); if (err) return err; } for (i = 0; i < peak_usb_adapter->ctrl_count; i++) { err = peak_usb_create_dev(peak_usb_adapter, intf, i); if (err) { /* deregister already created devices */ peak_usb_disconnect(intf); break; } } return err; } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver peak_usb_driver = { .name = PCAN_USB_DRIVER_NAME, .disconnect = peak_usb_disconnect, .probe = peak_usb_probe, .id_table = peak_usb_table, }; static int __init peak_usb_init(void) { int err; /* register this driver with the USB subsystem */ err = usb_register(&peak_usb_driver); if (err) pr_err("%s: usb_register failed (err %d)\n", PCAN_USB_DRIVER_NAME, err); return err; } static int peak_usb_do_device_exit(struct device *d, void *arg) { struct usb_interface *intf = to_usb_interface(d); struct peak_usb_device *dev; /* stop as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; if (netif_device_present(netdev)) if (dev->adapter->dev_exit) dev->adapter->dev_exit(dev); } return 0; } static void __exit peak_usb_exit(void) { int err; /* last chance do send any synchronous commands here */ err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL, NULL, peak_usb_do_device_exit); if (err) pr_err("%s: failed to stop all can devices (err %d)\n", PCAN_USB_DRIVER_NAME, err); /* deregister this driver with the USB subsystem */ usb_deregister(&peak_usb_driver); pr_info("%s: PCAN-USB interfaces driver unloaded\n", PCAN_USB_DRIVER_NAME); } module_init(peak_usb_init); module_exit(peak_usb_exit);
gpl-2.0
duynhat1902/lte_kernel_f260s
arch/x86/kernel/apic/bigsmp_32.c
4695
6594
/* * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs. * * Drives the local APIC in "clustered mode". */ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/smp.h> #include <asm/apicdef.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/apic.h> #include <asm/ipi.h> static unsigned bigsmp_get_apic_id(unsigned long x) { return (x >> 24) & 0xFF; } static int bigsmp_apic_id_registered(void) { return 1; } static const struct cpumask *bigsmp_target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_mask; #else return cpumask_of(0); #endif } static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) { return 0; } static unsigned long bigsmp_check_apicid_present(int bit) { return 1; } static int bigsmp_early_logical_apicid(int cpu) { /* on bigsmp, logical apicid is the same as physical */ return early_per_cpu(x86_cpu_to_apicid, cpu); } static inline unsigned long calculate_ldr(int cpu) { unsigned long val, id; val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; id = per_cpu(x86_bios_cpu_apicid, cpu); val |= SET_APIC_LOGICAL_ID(id); return val; } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ static void bigsmp_init_apic_ldr(void) { unsigned long val; int cpu = smp_processor_id(); apic_write(APIC_DFR, APIC_DFR_FLAT); val = calculate_ldr(cpu); apic_write(APIC_LDR, val); } static void bigsmp_setup_apic_routing(void) { printk(KERN_INFO "Enabling APIC mode: Physflat. Using %d I/O APICs\n", nr_ioapics); } static int bigsmp_cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; } static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) { /* For clustered we don't have a good way to do this yet - hack */ physids_promote(0xFFL, retmap); } static int bigsmp_check_phys_apicid_present(int phys_apicid) { return 1; } /* As we are using single CPU as destination, pick only one CPU here */ static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu = cpumask_first(cpumask); if (cpu < nr_cpu_ids) return cpu_physical_id(cpu); return BAD_APICID; } static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) return cpu_physical_id(cpu); } return BAD_APICID; } static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; } static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector) { default_send_IPI_mask_sequence_phys(mask, vector); } static void bigsmp_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void bigsmp_send_IPI_all(int vector) { bigsmp_send_IPI_mask(cpu_online_mask, vector); } static int dmi_bigsmp; /* can be set by dmi scanners */ static int hp_ht_bigsmp(const struct dmi_system_id *d) { printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident); dmi_bigsmp = 1; return 0; } static const struct dmi_system_id bigsmp_dmi_table[] = { { hp_ht_bigsmp, "HP ProLiant DL760 G2", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P44-"), } }, { hp_ht_bigsmp, "HP ProLiant DL740", { DMI_MATCH(DMI_BIOS_VENDOR, "HP"), DMI_MATCH(DMI_BIOS_VERSION, "P47-"), } }, { } /* NULL entry stops DMI scanning */ }; static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask) { cpumask_clear(retmask); cpumask_set_cpu(cpu, retmask); } static int probe_bigsmp(void) { if (def_to_bigsmp) dmi_bigsmp = 1; else dmi_check_system(bigsmp_dmi_table); return dmi_bigsmp; } static struct apic apic_bigsmp = { .name = "bigsmp", .probe = probe_bigsmp, .acpi_madt_oem_check = NULL, .apic_id_valid = default_apic_id_valid, .apic_id_registered = bigsmp_apic_id_registered, .irq_delivery_mode = dest_Fixed, /* phys delivery to target CPU: */ .irq_dest_mode = 0, .target_cpus = bigsmp_target_cpus, .disable_esr = 1, .dest_logical = 0, .check_apicid_used = bigsmp_check_apicid_used, .check_apicid_present = bigsmp_check_apicid_present, .vector_allocation_domain = bigsmp_vector_allocation_domain, .init_apic_ldr = bigsmp_init_apic_ldr, .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, .setup_apic_routing = bigsmp_setup_apic_routing, .multi_timer_check = NULL, .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, .check_phys_apicid_present = bigsmp_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = bigsmp_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = bigsmp_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0xFF << 24, .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and, .send_IPI_mask = bigsmp_send_IPI_mask, .send_IPI_mask_allbutself = NULL, .send_IPI_allbutself = bigsmp_send_IPI_allbutself, .send_IPI_all = bigsmp_send_IPI_all, .send_IPI_self = default_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, .x86_32_early_logical_apicid = bigsmp_early_logical_apicid, }; void __init generic_bigsmp_probe(void) { unsigned int cpu; if (!probe_bigsmp()) return; apic = &apic_bigsmp; for_each_possible_cpu(cpu) { if (early_per_cpu(x86_cpu_to_logical_apicid, cpu) == BAD_APICID) continue; early_per_cpu(x86_cpu_to_logical_apicid, cpu) = bigsmp_early_logical_apicid(cpu); } pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name); } apic_driver(apic_bigsmp);
gpl-2.0
varund7726/android_kernel_motorola_titan
arch/x86/kernel/apic/apic_flat_64.c
4695
10215
/* * Copyright 2004 James Cleverdon, IBM. * Subject to the GNU Public License, v.2 * * Flat APIC subarch code. * * Hacked for x86-64 by James Cleverdon from i386 architecture code by * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ #include <linux/errno.h> #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/hardirq.h> #include <linux/module.h> #include <asm/smp.h> #include <asm/apic.h> #include <asm/ipi.h> #ifdef CONFIG_ACPI #include <acpi/acpi_bus.h> #endif static struct apic apic_physflat; static struct apic apic_flat; struct apic __read_mostly *apic = &apic_flat; EXPORT_SYMBOL_GPL(apic); static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 1; } static const struct cpumask *flat_target_cpus(void) { return cpu_online_mask; } static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) { /* Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest * priority interrupt delivery mode. * * In particular there was a hyperthreading cpu observed to * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ cpumask_clear(retmask); cpumask_bits(retmask)[0] = APIC_ALL_CPUS; } /* * Set up the logical destination ID. * * Intel recommends to set DFR, LDR and TPR before enabling * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * document number 292116). So here it goes... */ void flat_init_apic_ldr(void) { unsigned long val; unsigned long num, id; num = smp_processor_id(); id = 1UL << num; apic_write(APIC_DFR, APIC_DFR_FLAT); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); apic_write(APIC_LDR, val); } static inline void _flat_send_IPI_mask(unsigned long mask, int vector) { unsigned long flags; local_irq_save(flags); __default_send_IPI_dest_field(mask, vector, apic->dest_logical); local_irq_restore(flags); } static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; _flat_send_IPI_mask(mask, vector); } static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { unsigned long mask = cpumask_bits(cpumask)[0]; int cpu = smp_processor_id(); if (cpu < BITS_PER_LONG) clear_bit(cpu, &mask); _flat_send_IPI_mask(mask, vector); } static void flat_send_IPI_allbutself(int vector) { int cpu = smp_processor_id(); #ifdef CONFIG_HOTPLUG_CPU int hotplug = 1; #else int hotplug = 0; #endif if (hotplug || vector == NMI_VECTOR) { if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { unsigned long mask = cpumask_bits(cpu_online_mask)[0]; if (cpu < BITS_PER_LONG) clear_bit(cpu, &mask); _flat_send_IPI_mask(mask, vector); } } else if (num_online_cpus() > 1) { __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical); } } static void flat_send_IPI_all(int vector) { if (vector == NMI_VECTOR) { flat_send_IPI_mask(cpu_online_mask, vector); } else { __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical); } } static unsigned int flat_get_apic_id(unsigned long x) { unsigned int id; id = (((x)>>24) & 0xFFu); return id; } static unsigned long set_apic_id(unsigned int id) { unsigned long x; x = ((id & 0xFFu)<<24); return x; } static unsigned int read_xapic_id(void) { unsigned int id; id = flat_get_apic_id(apic_read(APIC_ID)); return id; } static int flat_apic_id_registered(void) { return physid_isset(read_xapic_id(), phys_cpu_present_map); } static int flat_phys_pkg_id(int initial_apic_id, int index_msb) { return initial_apic_id >> index_msb; } static int flat_probe(void) { return 1; } static struct apic apic_flat = { .name = "flat", .probe = flat_probe, .acpi_madt_oem_check = flat_acpi_madt_oem_check, .apic_id_valid = default_apic_id_valid, .apic_id_registered = flat_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, .irq_dest_mode = 1, /* logical */ .target_cpus = flat_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = NULL, .check_apicid_present = NULL, .vector_allocation_domain = flat_vector_allocation_domain, .init_apic_ldr = flat_init_apic_ldr, .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu << 24, .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, .send_IPI_allbutself = flat_send_IPI_allbutself, .send_IPI_all = flat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; /* * Physflat mode is used when there are more than 8 CPUs on a system. * We cannot use logical delivery in this case because the mask * overflows, so use physical mode. */ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { #ifdef CONFIG_ACPI /* * Quirk: some x86_64 machines can only use physical APIC mode * regardless of how many processors are present (x86_64 ES7000 * is an example). */ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID && (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) { printk(KERN_DEBUG "system APIC only can use physical flat"); return 1; } if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); return 1; } #endif return 0; } static const struct cpumask *physflat_target_cpus(void) { return cpu_online_mask; } static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) { cpumask_clear(retmask); cpumask_set_cpu(cpu, retmask); } static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) { default_send_IPI_mask_sequence_phys(cpumask, vector); } static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { default_send_IPI_mask_allbutself_phys(cpumask, vector); } static void physflat_send_IPI_allbutself(int vector) { default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); } static void physflat_send_IPI_all(int vector) { physflat_send_IPI_mask(cpu_online_mask, vector); } static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ cpu = cpumask_first(cpumask); if ((unsigned)cpu < nr_cpu_ids) return per_cpu(x86_cpu_to_apicid, cpu); else return BAD_APICID; } static unsigned int physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ for_each_cpu_and(cpu, cpumask, andmask) { if (cpumask_test_cpu(cpu, cpu_online_mask)) break; } return per_cpu(x86_cpu_to_apicid, cpu); } static int physflat_probe(void) { if (apic == &apic_physflat || num_possible_cpus() > 8) return 1; return 0; } static struct apic apic_physflat = { .name = "physical flat", .probe = physflat_probe, .acpi_madt_oem_check = physflat_acpi_madt_oem_check, .apic_id_valid = default_apic_id_valid, .apic_id_registered = flat_apic_id_registered, .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 0, /* physical */ .target_cpus = physflat_target_cpus, .disable_esr = 0, .dest_logical = 0, .check_apicid_used = NULL, .check_apicid_present = NULL, .vector_allocation_domain = physflat_vector_allocation_domain, /* not needed, but shouldn't hurt: */ .init_apic_ldr = flat_init_apic_ldr, .ioapic_phys_id_map = NULL, .setup_apic_routing = NULL, .multi_timer_check = NULL, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = NULL, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = flat_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = flat_get_apic_id, .set_apic_id = set_apic_id, .apic_id_mask = 0xFFu << 24, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, .send_IPI_mask = physflat_send_IPI_mask, .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, .send_IPI_allbutself = physflat_send_IPI_allbutself, .send_IPI_all = physflat_send_IPI_all, .send_IPI_self = apic_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = NULL, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; /* * We need to check for physflat first, so this order is important. */ apic_drivers(apic_physflat, apic_flat);
gpl-2.0
DNSS4503/android_kernel_msm
drivers/mfd/wm831x-auxadc.c
5463
6809
/* * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs * * Copyright 2009-2011 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/irq.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/otp.h> #include <linux/mfd/wm831x/regulator.h> struct wm831x_auxadc_req { struct list_head list; enum wm831x_auxadc input; int val; struct completion done; }; static int wm831x_auxadc_read_irq(struct wm831x *wm831x, enum wm831x_auxadc input) { struct wm831x_auxadc_req *req; int ret; bool ena = false; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; init_completion(&req->done); req->input = input; req->val = -ETIMEDOUT; mutex_lock(&wm831x->auxadc_lock); /* Enqueue the request */ list_add(&req->list, &wm831x->auxadc_pending); ena = !wm831x->auxadc_active; if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret != 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } } /* Enable the conversion if not already running */ if (!(wm831x->auxadc_active & (1 << input))) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 1 << input); if (ret != 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } wm831x->auxadc_active |= 1 << input; } /* We convert at the fastest rate possible */ if (ena) { ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK, WM831X_AUX_CVT_ENA | WM831X_AUX_RATE_MASK); if (ret != 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto out; } } mutex_unlock(&wm831x->auxadc_lock); /* Wait for an interrupt */ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500)); mutex_lock(&wm831x->auxadc_lock); list_del(&req->list); ret = req->val; out: mutex_unlock(&wm831x->auxadc_lock); kfree(req); return ret; } static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data) { struct wm831x *wm831x = irq_data; struct wm831x_auxadc_req *req; int ret, input, val; ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); return IRQ_NONE; } input = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (input == 14) input = WM831X_AUX_CAL; val = ret & WM831X_AUX_DATA_MASK; mutex_lock(&wm831x->auxadc_lock); /* Disable this conversion, we're about to complete all users */ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE, 1 << input, 0); wm831x->auxadc_active &= ~(1 << input); /* Turn off the entire convertor if idle */ if (!wm831x->auxadc_active) wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0); /* Wake up any threads waiting for this request */ list_for_each_entry(req, &wm831x->auxadc_pending, list) { if (req->input == input) { req->val = val; complete(&req->done); } } mutex_unlock(&wm831x->auxadc_lock); return IRQ_HANDLED; } static int wm831x_auxadc_read_polled(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret, src, timeout; mutex_lock(&wm831x->auxadc_lock); ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, WM831X_AUX_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret); goto out; } /* We force a single source at present */ src = input; ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE, 1 << src); if (ret < 0) { dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret); goto out; } ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); if (ret < 0) { dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret); goto disable; } /* If we're not using interrupts then poll the * interrupt status register */ timeout = 5; while (timeout) { msleep(1); ret = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1); if (ret < 0) { dev_err(wm831x->dev, "ISR 1 read failed: %d\n", ret); goto disable; } /* Did it complete? */ if (ret & WM831X_AUXADC_DATA_EINT) { wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1, WM831X_AUXADC_DATA_EINT); break; } else { dev_err(wm831x->dev, "AUXADC conversion timeout\n"); ret = -EBUSY; goto disable; } } ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); if (ret < 0) { dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret); goto disable; } src = ((ret & WM831X_AUX_DATA_SRC_MASK) >> WM831X_AUX_DATA_SRC_SHIFT) - 1; if (src == 14) src = WM831X_AUX_CAL; if (src != input) { dev_err(wm831x->dev, "Data from source %d not %d\n", src, input); ret = -EINVAL; } else { ret &= WM831X_AUX_DATA_MASK; } disable: wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0); out: mutex_unlock(&wm831x->auxadc_lock); return ret; } /** * wm831x_auxadc_read: Read a value from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) { return wm831x->auxadc_read(wm831x, input); } EXPORT_SYMBOL_GPL(wm831x_auxadc_read); /** * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC * * @wm831x: Device to read from. * @input: AUXADC input to read. */ int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input) { int ret; ret = wm831x_auxadc_read(wm831x, input); if (ret < 0) return ret; ret *= 1465; return ret; } EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv); void wm831x_auxadc_init(struct wm831x *wm831x) { int ret; mutex_init(&wm831x->auxadc_lock); INIT_LIST_HEAD(&wm831x->auxadc_pending); if (wm831x->irq && wm831x->irq_base) { wm831x->auxadc_read = wm831x_auxadc_read_irq; ret = request_threaded_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, NULL, wm831x_auxadc_irq, 0, "auxadc", wm831x); if (ret < 0) { dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n", ret); wm831x->auxadc_read = NULL; } } if (!wm831x->auxadc_read) wm831x->auxadc_read = wm831x_auxadc_read_polled; }
gpl-2.0
ShinySide/SM-T900_Kernel
arch/sh/mm/consistent.c
8791
3716
/* * arch/sh/mm/consistent.c * * Copyright (C) 2004 - 2007 Paul Mundt * * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/dma-debug.h> #include <linux/io.h> #include <linux/module.h> #include <linux/gfp.h> #include <asm/cacheflush.h> #include <asm/addrspace.h> #define PREALLOC_DMA_DEBUG_ENTRIES 4096 struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_init); void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { void *ret, *ret_nocache; int order = get_order(size); gfp |= __GFP_ZERO; ret = (void *)__get_free_pages(gfp, order); if (!ret) return NULL; /* * Pages from the page allocator may have data present in * cache. So flush the cache before using uncached memory. */ dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); if (!ret_nocache) { free_pages((unsigned long)ret, order); return NULL; } split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); *dma_handle = virt_to_phys(ret); return ret_nocache; } void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { int order = get_order(size); unsigned long pfn = dma_handle >> PAGE_SHIFT; int k; for (k = 0; k < (1 << order); k++) __free_pages(pfn_to_page(pfn + k), 0); iounmap(vaddr); } void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { void *addr; addr = __in_29bit_mode() ? (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ __flush_invalidate_region(addr, size); break; case DMA_TO_DEVICE: /* writeback only */ __flush_wback_region(addr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ __flush_purge_region(addr, size); break; default: BUG(); } } EXPORT_SYMBOL(dma_cache_sync); static int __init memchunk_setup(char *str) { return 1; /* accept anything that begins with "memchunk." */ } __setup("memchunk.", memchunk_setup); static void __init memchunk_cmdline_override(char *name, unsigned long *sizep) { char *p = boot_command_line; int k = strlen(name); while ((p = strstr(p, "memchunk."))) { p += 9; /* strlen("memchunk.") */ if (!strncmp(name, p, k) && p[k] == '=') { p += k + 1; *sizep = memparse(p, NULL); pr_info("%s: forcing memory chunk size to 0x%08lx\n", name, *sizep); break; } } } int __init platform_resource_setup_memory(struct platform_device *pdev, char *name, unsigned long memsize) { struct resource *r; dma_addr_t dma_handle; void *buf; r = pdev->resource + pdev->num_resources - 1; if (r->flags) { pr_warning("%s: unable to find empty space for resource\n", name); return -EINVAL; } memchunk_cmdline_override(name, &memsize); if (!memsize) return 0; buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL); if (!buf) { pr_warning("%s: unable to allocate memory\n", name); return -ENOMEM; } memset(buf, 0, memsize); r->flags = IORESOURCE_MEM; r->start = dma_handle; r->end = r->start + memsize - 1; r->name = name; return 0; }
gpl-2.0
JulianKemmerer/Drexel-CS370
arch/ppc/platforms/prpmc800.c
88
13802
/* * Author: Dale Farnsworth <dale.farnsworth@mvista.com> * * 2001-2004 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/ide.h> #include <linux/root_dev.h> #include <linux/harrier_defs.h> #include <asm/byteorder.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/pci-bridge.h> #include <asm/open_pic.h> #include <asm/bootinfo.h> #include <asm/harrier.h> #include "prpmc800.h" #define HARRIER_REVI_REG (PRPMC800_HARRIER_XCSR_BASE+HARRIER_REVI_OFF) #define HARRIER_UCTL_REG (PRPMC800_HARRIER_XCSR_BASE+HARRIER_UCTL_OFF) #define HARRIER_MISC_CSR_REG (PRPMC800_HARRIER_XCSR_BASE+HARRIER_MISC_CSR_OFF) #define HARRIER_IFEVP_REG (PRPMC800_HARRIER_MPIC_BASE+HARRIER_MPIC_IFEVP_OFF) #define HARRIER_IFEDE_REG (PRPMC800_HARRIER_MPIC_BASE+HARRIER_MPIC_IFEDE_OFF) #define HARRIER_FEEN_REG (PRPMC800_HARRIER_XCSR_BASE+HARRIER_FEEN_OFF) #define HARRIER_FEMA_REG (PRPMC800_HARRIER_XCSR_BASE+HARRIER_FEMA_OFF) #define HARRIER_VENI_REG (PRPMC800_HARRIER_XCSR_BASE + HARRIER_VENI_OFF) #define HARRIER_MISC_CSR (PRPMC800_HARRIER_XCSR_BASE + \ HARRIER_MISC_CSR_OFF) #define MONARCH (monarch != 0) #define NON_MONARCH (monarch == 0) extern int mpic_init(void); extern unsigned long loops_per_jiffy; extern void gen550_progress(char *, unsigned short); static int monarch = 0; static int found_self = 0; static int self = 0; static u_char prpmc800_openpic_initsenses[] __initdata = { (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HOSTINT0 */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_DEBUGINT */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HARRIER_WDT */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HOSTINT1 */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HOSTINT2 */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HOSTINT3 */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_PMC_INTA */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_PMC_INTB */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_PMC_INTC */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_PMC_INTD */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_UNUSED */ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* PRPMC800_INT_HARRIER_INT (UARTS, ABORT, DMA) */ }; /* * Motorola PrPMC750/PrPMC800 in PrPMCBASE or PrPMC-Carrier * Combined irq tables. Only Base has IDSEL 14, only Carrier has 21 and 22. */ static inline int prpmc_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) { static char pci_irq_table[][4] = /* * PCI IDSEL/INTPIN->INTLINE * A B C D */ { {12, 0, 0, 0}, /* IDSEL 14 - Ethernet, base */ {0, 0, 0, 0}, /* IDSEL 15 - unused */ {10, 11, 12, 9}, /* IDSEL 16 - PMC A1, PMC1 */ {10, 11, 12, 9}, /* IDSEL 17 - PrPMC-A-B, PMC2-B */ {11, 12, 9, 10}, /* IDSEL 18 - PMC A1-B, PMC1-B */ {0, 0, 0, 0}, /* IDSEL 19 - unused */ {9, 10, 11, 12}, /* IDSEL 20 - P2P Bridge */ {11, 12, 9, 10}, /* IDSEL 21 - PMC A2, carrier */ {12, 9, 10, 11}, /* IDSEL 22 - PMC A2-B, carrier */ }; const long min_idsel = 14, max_idsel = 22, irqs_per_slot = 4; return PCI_IRQ_TABLE_LOOKUP; }; static int prpmc_read_config_dword(struct pci_controller *hose, u8 bus, u8 devfn, int offset, u32 * val) { /* paranoia */ if ((hose == NULL) || (hose->cfg_addr == NULL) || (hose->cfg_data == NULL)) return PCIBIOS_DEVICE_NOT_FOUND; out_be32(hose->cfg_addr, ((offset & 0xfc) << 24) | (devfn << 16) | ((bus - hose->bus_offset) << 8) | 0x80); *val = in_le32((u32 *) (hose->cfg_data + (offset & 3))); return PCIBIOS_SUCCESSFUL; } #define HARRIER_PCI_VEND_DEV_ID (PCI_VENDOR_ID_MOTOROLA | \ (PCI_DEVICE_ID_MOTOROLA_HARRIER << 16)) static int prpmc_self(u8 bus, u8 devfn) { /* * Harriers always view themselves as being on bus 0. If we're not * looking at bus 0, we're not going to find ourselves. */ if (bus != 0) return PCIBIOS_DEVICE_NOT_FOUND; else { int result; int val; struct pci_controller *hose; hose = pci_bus_to_hose(bus); /* See if target device is a Harrier */ result = prpmc_read_config_dword(hose, bus, devfn, PCI_VENDOR_ID, &val); if ((result != PCIBIOS_SUCCESSFUL) || (val != HARRIER_PCI_VEND_DEV_ID)) return PCIBIOS_DEVICE_NOT_FOUND; /* * LBA bit is set if target Harrier == initiating Harrier * (i.e. if we are reading our own PCI header). */ result = prpmc_read_config_dword(hose, bus, devfn, HARRIER_LBA_OFF, &val); if ((result != PCIBIOS_SUCCESSFUL) || ((val & HARRIER_LBA_MSK) != HARRIER_LBA_MSK)) return PCIBIOS_DEVICE_NOT_FOUND; /* It's us, save our location for later */ self = devfn; found_self = 1; return PCIBIOS_SUCCESSFUL; } } static int prpmc_exclude_device(u8 bus, u8 devfn) { /* * Monarch is allowed to access all PCI devices. Non-monarch is * only allowed to access its own Harrier. */ if (MONARCH) return PCIBIOS_SUCCESSFUL; if (found_self) if ((bus == 0) && (devfn == self)) return PCIBIOS_SUCCESSFUL; else return PCIBIOS_DEVICE_NOT_FOUND; else return prpmc_self(bus, devfn); } void __init prpmc800_find_bridges(void) { struct pci_controller *hose; int host_bridge; hose = pcibios_alloc_controller(); if (!hose) return; hose->first_busno = 0; hose->last_busno = 0xff; ppc_md.pci_exclude_device = prpmc_exclude_device; ppc_md.pcibios_fixup = NULL; ppc_md.pcibios_fixup_bus = NULL; ppc_md.pci_swizzle = common_swizzle; ppc_md.pci_map_irq = prpmc_map_irq; setup_indirect_pci(hose, PRPMC800_PCI_CONFIG_ADDR, PRPMC800_PCI_CONFIG_DATA); /* Get host bridge vendor/dev id */ host_bridge = in_be32((uint *) (HARRIER_VENI_REG)); if (host_bridge != HARRIER_VEND_DEV_ID) { printk(KERN_CRIT "Host bridge 0x%x not supported\n", host_bridge); return; } monarch = in_be32((uint *) HARRIER_MISC_CSR) & HARRIER_SYSCON; printk(KERN_INFO "Running as %s.\n", MONARCH ? "Monarch" : "Non-Monarch"); hose->io_space.start = PRPMC800_PCI_IO_START; hose->io_space.end = PRPMC800_PCI_IO_END; hose->io_base_virt = (void *)PRPMC800_ISA_IO_BASE; hose->pci_mem_offset = PRPMC800_PCI_PHY_MEM_OFFSET; pci_init_resource(&hose->io_resource, PRPMC800_PCI_IO_START, PRPMC800_PCI_IO_END, IORESOURCE_IO, "PCI host bridge"); if (MONARCH) { hose->mem_space.start = PRPMC800_PCI_MEM_START; hose->mem_space.end = PRPMC800_PCI_MEM_END; pci_init_resource(&hose->mem_resources[0], PRPMC800_PCI_MEM_START, PRPMC800_PCI_MEM_END, IORESOURCE_MEM, "PCI host bridge"); if (harrier_init(hose, PRPMC800_HARRIER_XCSR_BASE, PRPMC800_PROC_PCI_MEM_START, PRPMC800_PROC_PCI_MEM_END, PRPMC800_PROC_PCI_IO_START, PRPMC800_PROC_PCI_IO_END, PRPMC800_HARRIER_MPIC_BASE) != 0) printk(KERN_CRIT "Could not initialize HARRIER " "bridge\n"); harrier_release_eready(PRPMC800_HARRIER_XCSR_BASE); harrier_wait_eready(PRPMC800_HARRIER_XCSR_BASE); hose->last_busno = pciauto_bus_scan(hose, hose->first_busno); } else { pci_init_resource(&hose->mem_resources[0], PRPMC800_NM_PCI_MEM_START, PRPMC800_NM_PCI_MEM_END, IORESOURCE_MEM, "PCI host bridge"); hose->mem_space.start = PRPMC800_NM_PCI_MEM_START; hose->mem_space.end = PRPMC800_NM_PCI_MEM_END; if (harrier_init(hose, PRPMC800_HARRIER_XCSR_BASE, PRPMC800_NM_PROC_PCI_MEM_START, PRPMC800_NM_PROC_PCI_MEM_END, PRPMC800_PROC_PCI_IO_START, PRPMC800_PROC_PCI_IO_END, PRPMC800_HARRIER_MPIC_BASE) != 0) printk(KERN_CRIT "Could not initialize HARRIER " "bridge\n"); harrier_setup_nonmonarch(PRPMC800_HARRIER_XCSR_BASE, HARRIER_ITSZ_1MB); harrier_release_eready(PRPMC800_HARRIER_XCSR_BASE); } } static int prpmc800_show_cpuinfo(struct seq_file *m) { seq_printf(m, "machine\t\t: PrPMC800\n"); return 0; } static void __init prpmc800_setup_arch(void) { /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000 / HZ; /* Lookup PCI host bridges */ prpmc800_find_bridges(); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif #ifdef CONFIG_ROOT_NFS ROOT_DEV = Root_NFS; #else ROOT_DEV = Root_SDA2; #endif printk(KERN_INFO "Port by MontaVista Software, Inc. " "(source@mvista.com)\n"); } /* * Compute the PrPMC800's tbl frequency using the baud clock as a reference. */ static void __init prpmc800_calibrate_decr(void) { unsigned long tbl_start, tbl_end; unsigned long current_state, old_state, tb_ticks_per_second; unsigned int count; unsigned int harrier_revision; harrier_revision = readb(HARRIER_REVI_REG); if (harrier_revision < 2) { /* XTAL64 was broken in harrier revision 1 */ printk(KERN_INFO "time_init: Harrier revision %d, assuming " "100 Mhz bus\n", harrier_revision); tb_ticks_per_second = 100000000 / 4; tb_ticks_per_jiffy = tb_ticks_per_second / HZ; tb_to_us = mulhwu_scale_factor(tb_ticks_per_second, 1000000); return; } /* * The XTAL64 bit oscillates at the 1/64 the base baud clock * Set count to XTAL64 cycles per second. Since we'll count * half-cycles, we'll reach the count in half a second. */ count = PRPMC800_BASE_BAUD / 64; /* Find the first edge of the baud clock */ old_state = readb(HARRIER_UCTL_REG) & HARRIER_XTAL64_MASK; do { current_state = readb(HARRIER_UCTL_REG) & HARRIER_XTAL64_MASK; } while (old_state == current_state); old_state = current_state; /* Get the starting time base value */ tbl_start = get_tbl(); /* * Loop until we have found a number of edges (half-cycles) * equal to the count (half a second) */ do { do { current_state = readb(HARRIER_UCTL_REG) & HARRIER_XTAL64_MASK; } while (old_state == current_state); old_state = current_state; } while (--count); /* Get the ending time base value */ tbl_end = get_tbl(); /* We only counted for half a second, so double to get ticks/second */ tb_ticks_per_second = (tbl_end - tbl_start) * 2; tb_ticks_per_jiffy = tb_ticks_per_second / HZ; tb_to_us = mulhwu_scale_factor(tb_ticks_per_second, 1000000); } static void prpmc800_restart(char *cmd) { ulong temp; local_irq_disable(); temp = in_be32((uint *) HARRIER_MISC_CSR_REG); temp |= HARRIER_RSTOUT; out_be32((uint *) HARRIER_MISC_CSR_REG, temp); while (1) ; } static void prpmc800_halt(void) { local_irq_disable(); while (1) ; } static void prpmc800_power_off(void) { prpmc800_halt(); } static void __init prpmc800_init_IRQ(void) { OpenPIC_InitSenses = prpmc800_openpic_initsenses; OpenPIC_NumInitSenses = sizeof(prpmc800_openpic_initsenses); /* Setup external interrupt sources. */ openpic_set_sources(0, 16, OpenPIC_Addr + 0x10000); /* Setup internal UART interrupt source. */ openpic_set_sources(16, 1, OpenPIC_Addr + 0x10200); /* Do the MPIC initialization based on the above settings. */ openpic_init(0); /* enable functional exceptions for uarts and abort */ out_8((u8 *) HARRIER_FEEN_REG, (HARRIER_FE_UA0 | HARRIER_FE_UA1)); out_8((u8 *) HARRIER_FEMA_REG, ~(HARRIER_FE_UA0 | HARRIER_FE_UA1)); } /* * Set BAT 3 to map 0xf0000000 to end of physical memory space. */ static __inline__ void prpmc800_set_bat(void) { mb(); mtspr(SPRN_DBAT1U, 0xf0001ffe); mtspr(SPRN_DBAT1L, 0xf000002a); mb(); } /* * We need to read the Harrier memory controller * to properly determine this value */ static unsigned long __init prpmc800_find_end_of_memory(void) { /* Read the memory size from the Harrier XCSR */ return harrier_get_mem_size(PRPMC800_HARRIER_XCSR_BASE); } static void __init prpmc800_map_io(void) { io_block_mapping(0x80000000, 0x80000000, 0x10000000, _PAGE_IO); io_block_mapping(0xf0000000, 0xf0000000, 0x10000000, _PAGE_IO); } void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { parse_bootinfo(find_bootinfo()); prpmc800_set_bat(); isa_io_base = PRPMC800_ISA_IO_BASE; isa_mem_base = PRPMC800_ISA_MEM_BASE; pci_dram_offset = PRPMC800_PCI_DRAM_OFFSET; ppc_md.setup_arch = prpmc800_setup_arch; ppc_md.show_cpuinfo = prpmc800_show_cpuinfo; ppc_md.init_IRQ = prpmc800_init_IRQ; ppc_md.get_irq = openpic_get_irq; ppc_md.find_end_of_memory = prpmc800_find_end_of_memory; ppc_md.setup_io_mappings = prpmc800_map_io; ppc_md.restart = prpmc800_restart; ppc_md.power_off = prpmc800_power_off; ppc_md.halt = prpmc800_halt; /* PrPMC800 has no timekeeper part */ ppc_md.time_init = NULL; ppc_md.get_rtc_time = NULL; ppc_md.set_rtc_time = NULL; ppc_md.calibrate_decr = prpmc800_calibrate_decr; #ifdef CONFIG_SERIAL_TEXT_DEBUG ppc_md.progress = gen550_progress; #else /* !CONFIG_SERIAL_TEXT_DEBUG */ ppc_md.progress = NULL; #endif /* CONFIG_SERIAL_TEXT_DEBUG */ }
gpl-2.0
u-blox/linux
drivers/dma/fsldma.c
88
39211
/* * Freescale MPC85xx, MPC83xx DMA Engine support * * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. * * Author: * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 * * Description: * DMA engine driver for Freescale MPC8540 DMA controller, which is * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. * The support for MPC8349 DMA controller is also added. * * This driver instructs the DMA controller to issue the PCI Read Multiple * command for PCI read operations, instead of using the default PCI Read Line * command. Please be aware that this setting may result in read pre-fetching * on some platforms. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include "dmaengine.h" #include "fsldma.h" #define chan_dbg(chan, fmt, arg...) \ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) #define chan_err(chan, fmt, arg...) \ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) static const char msg_ld_oom[] = "No free memory for link descriptor"; /* * Register Helpers */ static void set_sr(struct fsldma_chan *chan, u32 val) { DMA_OUT(chan, &chan->regs->sr, val, 32); } static u32 get_sr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->sr, 32); } static void set_mr(struct fsldma_chan *chan, u32 val) { DMA_OUT(chan, &chan->regs->mr, val, 32); } static u32 get_mr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->mr, 32); } static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) { DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); } static dma_addr_t get_cdar(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } static void set_bcr(struct fsldma_chan *chan, u32 val) { DMA_OUT(chan, &chan->regs->bcr, val, 32); } static u32 get_bcr(struct fsldma_chan *chan) { return DMA_IN(chan, &chan->regs->bcr, 32); } /* * Descriptor Helpers */ static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { hw->count = CPU_TO_DMA(chan, count, 32); } static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } static void set_desc_dst(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; desc->hw.next_ln_addr = CPU_TO_DMA(chan, DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | snoop_bits, 64); } /* * DMA Engine Hardware Control Helpers */ static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ set_mr(chan, 0); switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable * EOLNIE - End of links interrupt enable * BWC - Bandwidth sharing among channels */ set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE); break; case FSL_DMA_IP_83XX: /* Set the channel to below modes: * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM); break; } } static int dma_is_idle(struct fsldma_chan *chan) { u32 sr = get_sr(chan); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } /* * Start the DMA controller * * Preconditions: * - the CDAR register must point to the start descriptor * - the MRn[CS] bit must be cleared */ static void dma_start(struct fsldma_chan *chan) { u32 mode; mode = get_mr(chan); if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { set_bcr(chan, 0); mode |= FSL_DMA_MR_EMP_EN; } else { mode &= ~FSL_DMA_MR_EMP_EN; } if (chan->feature & FSL_DMA_CHAN_START_EXT) { mode |= FSL_DMA_MR_EMS_EN; } else { mode &= ~FSL_DMA_MR_EMS_EN; mode |= FSL_DMA_MR_CS; } set_mr(chan, mode); } static void dma_halt(struct fsldma_chan *chan) { u32 mode; int i; /* read the mode register */ mode = get_mr(chan); /* * The 85xx controller supports channel abort, which will stop * the current transfer. On 83xx, this bit is the transfer error * mask bit, which should not be changed. */ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { mode |= FSL_DMA_MR_CA; set_mr(chan, mode); mode &= ~FSL_DMA_MR_CA; } /* stop the DMA controller */ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); set_mr(chan, mode); /* wait for the DMA controller to become idle */ for (i = 0; i < 100; i++) { if (dma_is_idle(chan)) return; udelay(10); } if (!dma_is_idle(chan)) chan_err(chan, "DMA halt timeout!\n"); } /** * fsl_chan_set_src_loop_size - Set source address hold transfer size * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set source address hold transfer size. The source * address hold or loop transfer size is when the DMA transfer * data from source address (SA), if the loop size is 4, the DMA will * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) { u32 mode; mode = get_mr(chan); switch (size) { case 0: mode &= ~FSL_DMA_MR_SAHE; break; case 1: case 2: case 4: case 8: mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); break; } set_mr(chan, mode); } /** * fsl_chan_set_dst_loop_size - Set destination address hold transfer size * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set destination address hold transfer size. The destination * address hold or loop transfer size is when the DMA transfer * data to destination address (TA), if the loop size is 4, the DMA will * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) { u32 mode; mode = get_mr(chan); switch (size) { case 0: mode &= ~FSL_DMA_MR_DAHE; break; case 1: case 2: case 4: case 8: mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); break; } set_mr(chan, mode); } /** * fsl_chan_set_request_count - Set DMA Request Count for external control * @chan : Freescale DMA channel * @size : Number of bytes to transfer in a single request * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA request count is how many bytes are allowed to transfer before * pausing the channel, after which a new assertion of DREQ# resumes channel * operation. * * A size of 0 disables external pause control. The maximum size is 1024. */ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) { u32 mode; BUG_ON(size > 1024); mode = get_mr(chan); mode |= (__ilog2(size) << 24) & 0x0f000000; set_mr(chan, mode); } /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) { if (enable) chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; else chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } /** * fsl_chan_toggle_ext_start - Toggle channel external start status * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * If enable the external start, the channel can be started by an * external DMA start pin. So the dma_start() does not start the * transfer immediately. The DMA channel will wait for the * control pin asserted. */ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) { if (enable) chan->feature |= FSL_DMA_CHAN_START_EXT; else chan->feature &= ~FSL_DMA_CHAN_START_EXT; } static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); if (list_empty(&chan->ld_pending)) goto out_splice; /* * Add the hardware descriptor to the chain of hardware descriptors * that already exists in memory. * * This will un-set the EOL bit of the existing transaction, and the * last link in this transaction will become the EOL descriptor. */ set_desc_next(chan, &tail->hw, desc->async_tx.phys); /* * Add the software descriptor and all children to the list * of pending transactions */ out_splice: list_splice_tail_init(&desc->tx_list, &chan->ld_pending); } static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; dma_cookie_t cookie = -EINVAL; spin_lock_bh(&chan->desc_lock); #ifdef CONFIG_PM if (unlikely(chan->pm_state != RUNNING)) { chan_dbg(chan, "cannot submit due to suspend\n"); spin_unlock_bh(&chan->desc_lock); return -1; } #endif /* * assign cookies to all of the software descriptors * that make up this transaction */ list_for_each_entry(child, &desc->tx_list, node) { cookie = dma_cookie_assign(&child->async_tx); } /* put this transaction onto the tail of the pending queue */ append_ld_queue(chan, desc); spin_unlock_bh(&chan->desc_lock); return cookie; } /** * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool. * @chan : Freescale DMA channel * @desc: descriptor to be freed */ static void fsl_dma_free_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { list_del(&desc->node); chan_dbg(chan, "LD %p free\n", desc); dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. * @chan : Freescale DMA channel * * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; dma_addr_t pdesc; desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); if (!desc) { chan_dbg(chan, "out of memory for link descriptor\n"); return NULL; } memset(desc, 0, sizeof(*desc)); INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = fsl_dma_tx_submit; desc->async_tx.phys = pdesc; chan_dbg(chan, "LD %p allocated\n", desc); return desc; } /** * fsldma_clean_completed_descriptor - free all descriptors which * has been completed and acked * @chan: Freescale DMA channel * * This function is used on all completed and acked descriptors. * All descriptors should only be freed in this function. */ static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) { struct fsl_desc_sw *desc, *_desc; /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) if (async_tx_test_ack(&desc->async_tx)) fsl_dma_free_descriptor(chan, desc); } /** * fsldma_run_tx_complete_actions - cleanup a single link descriptor * @chan: Freescale DMA channel * @desc: descriptor to cleanup and free * @cookie: Freescale DMA transaction identifier * * This function is used on a descriptor which has been executed by the DMA * controller. It will run any callbacks, submit any dependencies. */ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, struct fsl_desc_sw *desc, dma_cookie_t cookie) { struct dma_async_tx_descriptor *txd = &desc->async_tx; dma_cookie_t ret = cookie; BUG_ON(txd->cookie < 0); if (txd->cookie > 0) { ret = txd->cookie; /* Run the link descriptor callback function */ if (txd->callback) { chan_dbg(chan, "LD %p callback\n", desc); txd->callback(txd->callback_param); } } /* Run any dependencies */ dma_run_dependencies(txd); return ret; } /** * fsldma_clean_running_descriptor - move the completed descriptor from * ld_running to ld_completed * @chan: Freescale DMA channel * @desc: the descriptor which is completed * * Free the descriptor directly if acked by async_tx api, or move it to * queue ld_completed. */ static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { /* Remove from the list of transactions */ list_del(&desc->node); /* * the client is allowed to attach dependent operations * until 'ack' is set */ if (!async_tx_test_ack(&desc->async_tx)) { /* * Move this descriptor to the list of descriptors which is * completed, but still awaiting the 'ack' bit to be set. */ list_add_tail(&desc->node, &chan->ld_completed); return; } dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } /** * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel * * HARDWARE STATE: idle * LOCKING: must hold chan->desc_lock */ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { struct fsl_desc_sw *desc; /* * If the list of pending descriptors is empty, then we * don't need to do any work at all */ if (list_empty(&chan->ld_pending)) { chan_dbg(chan, "no pending LDs\n"); return; } /* * The DMA controller is not idle, which means that the interrupt * handler will start any queued transactions when it runs after * this transaction finishes */ if (!chan->idle) { chan_dbg(chan, "DMA controller still busy\n"); return; } /* * If there are some link descriptors which have not been * transferred, we need to start the controller */ /* * Move all elements from the queue of pending transactions * onto the list of running transactions */ chan_dbg(chan, "idle, starting controller\n"); desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); list_splice_tail_init(&chan->ld_pending, &chan->ld_running); /* * The 85xx DMA controller doesn't clear the channel start bit * automatically at the end of a transfer. Therefore we must clear * it in software before starting the transfer. */ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { u32 mode; mode = get_mr(chan); mode &= ~FSL_DMA_MR_CS; set_mr(chan, mode); } /* * Program the descriptor's address into the DMA controller, * then start the DMA transaction */ set_cdar(chan, desc->async_tx.phys); get_cdar(chan); dma_start(chan); chan->idle = false; } /** * fsldma_cleanup_descriptors - cleanup link descriptors which are completed * and move them to ld_completed to free until flag 'ack' is set * @chan: Freescale DMA channel * * This function is used on descriptors which have been executed by the DMA * controller. It will run any callbacks, submit any dependencies, then * free these descriptors if flag 'ack' is set. */ static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) { struct fsl_desc_sw *desc, *_desc; dma_cookie_t cookie = 0; dma_addr_t curr_phys = get_cdar(chan); int seen_current = 0; fsldma_clean_completed_descriptor(chan); /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { /* * do not advance past the current descriptor loaded into the * hardware channel, subsequent descriptors are either in * process or have not been submitted */ if (seen_current) break; /* * stop the search if we reach the current descriptor and the * channel is busy */ if (desc->async_tx.phys == curr_phys) { seen_current = 1; if (!dma_is_idle(chan)) break; } cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); fsldma_clean_running_descriptor(chan, desc); } /* * Start any pending transactions automatically * * In the ideal case, we keep the DMA controller busy while we go * ahead and free the descriptors below. */ fsl_chan_xfer_ld_queue(chan); if (cookie > 0) chan->common.completed_cookie = cookie; } /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * @chan : Freescale DMA channel * * This function will create a dma pool for descriptor allocation. * * Return - The number of descriptors allocated. */ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); /* Has this channel already been allocated? */ if (chan->desc_pool) return 1; /* * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ chan->desc_pool = dma_pool_create(chan->name, chan->dev, sizeof(struct fsl_desc_sw), __alignof__(struct fsl_desc_sw), 0); if (!chan->desc_pool) { chan_err(chan, "unable to allocate descriptor pool\n"); return -ENOMEM; } /* there is at least one descriptor free to be allocated */ return 1; } /** * fsldma_free_desc_list - Free all descriptors in a queue * @chan: Freescae DMA channel * @list: the list to free * * LOCKING: must hold chan->desc_lock */ static void fsldma_free_desc_list(struct fsldma_chan *chan, struct list_head *list) { struct fsl_desc_sw *desc, *_desc; list_for_each_entry_safe(desc, _desc, list, node) fsl_dma_free_descriptor(chan, desc); } static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, struct list_head *list) { struct fsl_desc_sw *desc, *_desc; list_for_each_entry_safe_reverse(desc, _desc, list, node) fsl_dma_free_descriptor(chan, desc); } /** * fsl_dma_free_chan_resources - Free all resources of the channel. * @chan : Freescale DMA channel */ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); chan_dbg(chan, "free all channel resources\n"); spin_lock_bh(&chan->desc_lock); fsldma_cleanup_descriptors(chan); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_completed); spin_unlock_bh(&chan->desc_lock); dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * fsl_dma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; size_t copy; if (!dchan) return NULL; if (!len) return NULL; chan = to_fsl_chan(dchan); do { /* Allocate the link descriptor from DMA pool */ new = fsl_dma_alloc_descriptor(chan); if (!new) { chan_err(chan, "%s\n", msg_ld_oom); goto fail; } copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); set_desc_cnt(chan, &new->hw, copy); set_desc_src(chan, &new->hw, dma_src); set_desc_dst(chan, &new->hw, dma_dst); if (!first) first = new; else set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); prev = new; len -= copy; dma_src += copy; dma_dst += copy; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); } while (len); new->async_tx.flags = flags; /* client is in control of this ack */ new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &first->async_tx; fail: if (!first) return NULL; fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, struct scatterlist *dst_sg, unsigned int dst_nents, struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsldma_chan *chan = to_fsl_chan(dchan); size_t dst_avail, src_avail; dma_addr_t dst, src; size_t len; /* basic sanity checks */ if (dst_nents == 0 || src_nents == 0) return NULL; if (dst_sg == NULL || src_sg == NULL) return NULL; /* * TODO: should we check that both scatterlists have the same * TODO: number of bytes in total? Is that really an error? */ /* get prepared for the loop */ dst_avail = sg_dma_len(dst_sg); src_avail = sg_dma_len(src_sg); /* run until we are out of scatterlist entries */ while (true) { /* create the largest transaction possible */ len = min_t(size_t, src_avail, dst_avail); len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); if (len == 0) goto fetch; dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; /* allocate and populate the descriptor */ new = fsl_dma_alloc_descriptor(chan); if (!new) { chan_err(chan, "%s\n", msg_ld_oom); goto fail; } set_desc_cnt(chan, &new->hw, len); set_desc_src(chan, &new->hw, src); set_desc_dst(chan, &new->hw, dst); if (!first) first = new; else set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); prev = new; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); /* update metadata */ dst_avail -= len; src_avail -= len; fetch: /* fetch the next dst scatterlist entry */ if (dst_avail == 0) { /* no more entries: we're done */ if (dst_nents == 0) break; /* fetch the next entry: if there are no more: done */ dst_sg = sg_next(dst_sg); if (dst_sg == NULL) break; dst_nents--; dst_avail = sg_dma_len(dst_sg); } /* fetch the next src scatterlist entry */ if (src_avail == 0) { /* no more entries: we're done */ if (src_nents == 0) break; /* fetch the next entry: if there are no more: done */ src_sg = sg_next(src_sg); if (src_sg == NULL) break; src_nents--; src_avail = sg_dma_len(src_sg); } } new->async_tx.flags = flags; /* client is in control of this ack */ new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ set_ld_eol(chan, new); return &first->async_tx; fail: if (!first) return NULL; fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } /** * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @chan: DMA channel * @sgl: scatterlist to transfer to/from * @sg_len: number of entries in @scatterlist * @direction: DMA direction * @flags: DMAEngine flags * @context: transaction context (ignored) * * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the * DMA_SLAVE API, this gets the device-specific information from the * chan->private variable. */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { /* * This operation is not supported on the Freescale DMA controller * * However, we need to provide the function pointer to allow the * device_control() method to work. */ return NULL; } static int fsl_dma_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct dma_slave_config *config; struct fsldma_chan *chan; int size; if (!dchan) return -EINVAL; chan = to_fsl_chan(dchan); switch (cmd) { case DMA_TERMINATE_ALL: spin_lock_bh(&chan->desc_lock); /* Halt the DMA engine */ dma_halt(chan); /* Remove and free all of the descriptors in the LD queue */ fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_completed); chan->idle = true; spin_unlock_bh(&chan->desc_lock); return 0; case DMA_SLAVE_CONFIG: config = (struct dma_slave_config *)arg; /* make sure the channel supports setting burst size */ if (!chan->set_request_count) return -ENXIO; /* we set the controller burst size depending on direction */ if (config->direction == DMA_MEM_TO_DEV) size = config->dst_addr_width * config->dst_maxburst; else size = config->src_addr_width * config->src_maxburst; chan->set_request_count(chan, size); return 0; case FSLDMA_EXTERNAL_START: /* make sure the channel supports external start */ if (!chan->toggle_ext_start) return -ENXIO; chan->toggle_ext_start(chan, arg); return 0; default: return -ENXIO; } return 0; } /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command * @chan : Freescale DMA channel */ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); spin_lock_bh(&chan->desc_lock); fsl_chan_xfer_ld_queue(chan); spin_unlock_bh(&chan->desc_lock); } /** * fsl_tx_status - Determine the DMA status * @chan : Freescale DMA channel */ static enum dma_status fsl_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct fsldma_chan *chan = to_fsl_chan(dchan); enum dma_status ret; ret = dma_cookie_status(dchan, cookie, txstate); if (ret == DMA_COMPLETE) return ret; spin_lock_bh(&chan->desc_lock); fsldma_cleanup_descriptors(chan); spin_unlock_bh(&chan->desc_lock); return dma_cookie_status(dchan, cookie, txstate); } /*----------------------------------------------------------------------------*/ /* Interrupt Handling */ /*----------------------------------------------------------------------------*/ static irqreturn_t fsldma_chan_irq(int irq, void *data) { struct fsldma_chan *chan = data; u32 stat; /* save and clear the status register */ stat = get_sr(chan); set_sr(chan, stat); chan_dbg(chan, "irq: stat = 0x%x\n", stat); /* check that this was really our device */ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) chan_err(chan, "Transfer Error!\n"); /* * Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * trigger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { chan_dbg(chan, "irq: Programming Error INT\n"); stat &= ~FSL_DMA_SR_PE; if (get_bcr(chan) != 0) chan_err(chan, "Programming Error!\n"); } /* * For MPC8349, EOCDI event need to update cookie * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { chan_dbg(chan, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; } /* * If it current transfer is the end-of-transfer, * we should clear the Channel Start bit for * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { chan_dbg(chan, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; } /* check that the DMA controller is really idle */ if (!dma_is_idle(chan)) chan_err(chan, "irq: controller not idle!\n"); /* check that we handled all of the bits */ if (stat) chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); /* * Schedule the tasklet to handle all cleanup of the current * transaction. It will start a new transaction if there is * one pending. */ tasklet_schedule(&chan->tasklet); chan_dbg(chan, "irq: Exit\n"); return IRQ_HANDLED; } static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; chan_dbg(chan, "tasklet entry\n"); spin_lock_bh(&chan->desc_lock); /* the hardware is now idle and ready for more */ chan->idle = true; /* Run all cleanup for descriptors which have been completed */ fsldma_cleanup_descriptors(chan); spin_unlock_bh(&chan->desc_lock); chan_dbg(chan, "tasklet exit\n"); } static irqreturn_t fsldma_ctrl_irq(int irq, void *data) { struct fsldma_device *fdev = data; struct fsldma_chan *chan; unsigned int handled = 0; u32 gsr, mask; int i; gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) : in_le32(fdev->regs); mask = 0xff000000; dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; if (gsr & mask) { dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); fsldma_chan_irq(irq, chan); handled++; } gsr &= ~mask; mask >>= 8; } return IRQ_RETVAL(handled); } static void fsldma_free_irqs(struct fsldma_device *fdev) { struct fsldma_chan *chan; int i; if (fdev->irq != NO_IRQ) { dev_dbg(fdev->dev, "free per-controller IRQ\n"); free_irq(fdev->irq, fdev); return; } for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (chan && chan->irq != NO_IRQ) { chan_dbg(chan, "free per-channel IRQ\n"); free_irq(chan->irq, chan); } } } static int fsldma_request_irqs(struct fsldma_device *fdev) { struct fsldma_chan *chan; int ret; int i; /* if we have a per-controller IRQ, use that */ if (fdev->irq != NO_IRQ) { dev_dbg(fdev->dev, "request per-controller IRQ\n"); ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, "fsldma-controller", fdev); return ret; } /* no per-controller IRQ, use the per-channel IRQs */ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; if (chan->irq == NO_IRQ) { chan_err(chan, "interrupts property missing in device tree\n"); ret = -ENODEV; goto out_unwind; } chan_dbg(chan, "request per-channel IRQ\n"); ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, "fsldma-chan", chan); if (ret) { chan_err(chan, "unable to request per-channel IRQ\n"); goto out_unwind; } } return 0; out_unwind: for (/* none */; i >= 0; i--) { chan = fdev->chan[i]; if (!chan) continue; if (chan->irq == NO_IRQ) continue; free_irq(chan->irq, chan); } return ret; } /*----------------------------------------------------------------------------*/ /* OpenFirmware Subsystem */ /*----------------------------------------------------------------------------*/ static int fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { struct fsldma_chan *chan; struct resource res; int err; /* alloc channel */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) { dev_err(fdev->dev, "no free memory for DMA channels!\n"); err = -ENOMEM; goto out_return; } /* ioremap registers for use */ chan->regs = of_iomap(node, 0); if (!chan->regs) { dev_err(fdev->dev, "unable to ioremap registers\n"); err = -ENOMEM; goto out_free_chan; } err = of_address_to_resource(node, 0, &res); if (err) { dev_err(fdev->dev, "unable to find 'reg' property\n"); goto out_iounmap_regs; } chan->feature = feature; if (!fdev->feature) fdev->feature = chan->feature; /* * If the DMA device's feature is different than the feature * of its channels, report the bug */ WARN_ON(fdev->feature != chan->feature); chan->dev = fdev->dev; chan->id = (res.start & 0xfff) < 0x300 ? ((res.start - 0x100) & 0xfff) >> 7 : ((res.start - 0x200) & 0xfff) >> 7; if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; goto out_iounmap_regs; } fdev->chan[chan->id] = chan; tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); /* Initialize the channel */ dma_init(chan); /* Clear cdar registers */ set_cdar(chan, 0); switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->set_src_loop_size = fsl_chan_set_src_loop_size; chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; chan->set_request_count = fsl_chan_set_request_count; } spin_lock_init(&chan->desc_lock); INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); INIT_LIST_HEAD(&chan->ld_completed); chan->idle = true; #ifdef CONFIG_PM chan->pm_state = RUNNING; #endif chan->common.device = &fdev->common; dma_cookie_init(&chan->common); /* find the IRQ line, if it exists in the device tree */ chan->irq = irq_of_parse_and_map(node, 0); /* Add the channel to DMA device channel list */ list_add_tail(&chan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, chan->irq != NO_IRQ ? chan->irq : fdev->irq); return 0; out_iounmap_regs: iounmap(chan->regs); out_free_chan: kfree(chan); out_return: return err; } static void fsl_dma_chan_remove(struct fsldma_chan *chan) { irq_dispose_mapping(chan->irq); list_del(&chan->common.device_node); iounmap(chan->regs); kfree(chan); } static int fsldma_of_probe(struct platform_device *op) { struct fsldma_device *fdev; struct device_node *child; int err; fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { dev_err(&op->dev, "No enough memory for 'priv'\n"); err = -ENOMEM; goto out_return; } fdev->dev = &op->dev; INIT_LIST_HEAD(&fdev->common.channels); /* ioremap the registers for use */ fdev->regs = of_iomap(op->dev.of_node, 0); if (!fdev->regs) { dev_err(&op->dev, "unable to ioremap registers\n"); err = -ENOMEM; goto out_free_fdev; } /* map the channel IRQ if it exists, but don't hookup the handler yet */ fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_SG, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_control = fsl_dma_device_control; fdev->common.dev = &op->dev; dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); platform_set_drvdata(op, fdev); /* * We cannot use of_platform_bus_probe() because there is no * of_platform_bus_remove(). Instead, we manually instantiate every DMA * channel object. */ for_each_child_of_node(op->dev.of_node, child) { if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, "fsl,eloplus-dma-channel"); } if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, "fsl,elo-dma-channel"); } } /* * Hookup the IRQ handler(s) * * If we have a per-controller interrupt, we prefer that to the * per-channel interrupts to reduce the number of shared interrupt * handlers on the same IRQ line */ err = fsldma_request_irqs(fdev); if (err) { dev_err(fdev->dev, "unable to request IRQs\n"); goto out_free_fdev; } dma_async_device_register(&fdev->common); return 0; out_free_fdev: irq_dispose_mapping(fdev->irq); kfree(fdev); out_return: return err; } static int fsldma_of_remove(struct platform_device *op) { struct fsldma_device *fdev; unsigned int i; fdev = platform_get_drvdata(op); dma_async_device_unregister(&fdev->common); fsldma_free_irqs(fdev); for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); } iounmap(fdev->regs); kfree(fdev); return 0; } #ifdef CONFIG_PM static int fsldma_suspend_late(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct fsldma_device *fdev = platform_get_drvdata(pdev); struct fsldma_chan *chan; int i; for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; spin_lock_bh(&chan->desc_lock); if (unlikely(!chan->idle)) goto out; chan->regs_save.mr = get_mr(chan); chan->pm_state = SUSPENDED; spin_unlock_bh(&chan->desc_lock); } return 0; out: for (; i >= 0; i--) { chan = fdev->chan[i]; if (!chan) continue; chan->pm_state = RUNNING; spin_unlock_bh(&chan->desc_lock); } return -EBUSY; } static int fsldma_resume_early(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct fsldma_device *fdev = platform_get_drvdata(pdev); struct fsldma_chan *chan; u32 mode; int i; for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; if (!chan) continue; spin_lock_bh(&chan->desc_lock); mode = chan->regs_save.mr & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; set_mr(chan, mode); chan->pm_state = RUNNING; spin_unlock_bh(&chan->desc_lock); } return 0; } static const struct dev_pm_ops fsldma_pm_ops = { .suspend_late = fsldma_suspend_late, .resume_early = fsldma_resume_early, }; #endif static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} }; static struct platform_driver fsldma_of_driver = { .driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, .of_match_table = fsldma_of_ids, #ifdef CONFIG_PM .pm = &fsldma_pm_ops, #endif }, .probe = fsldma_of_probe, .remove = fsldma_of_remove, }; /*----------------------------------------------------------------------------*/ /* Module Init / Exit */ /*----------------------------------------------------------------------------*/ static __init int fsldma_init(void) { pr_info("Freescale Elo series DMA driver\n"); return platform_driver_register(&fsldma_of_driver); } static void __exit fsldma_exit(void) { platform_driver_unregister(&fsldma_of_driver); } subsys_initcall(fsldma_init); module_exit(fsldma_exit); MODULE_DESCRIPTION("Freescale Elo series DMA driver"); MODULE_LICENSE("GPL");
gpl-2.0
EnJens/android-tegra-2.6.36-adam
drivers/gpu/drm/nouveau/nv10_graph.c
88
29224
/* * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr> * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm.h" #include "nouveau_drm.h" #include "nouveau_drv.h" #define NV10_FIFO_NUMBER 32 struct pipe_state { uint32_t pipe_0x0000[0x040/4]; uint32_t pipe_0x0040[0x010/4]; uint32_t pipe_0x0200[0x0c0/4]; uint32_t pipe_0x4400[0x080/4]; uint32_t pipe_0x6400[0x3b0/4]; uint32_t pipe_0x6800[0x2f0/4]; uint32_t pipe_0x6c00[0x030/4]; uint32_t pipe_0x7000[0x130/4]; uint32_t pipe_0x7400[0x0c0/4]; uint32_t pipe_0x7800[0x0c0/4]; }; static int nv10_graph_ctx_regs[] = { NV10_PGRAPH_CTX_SWITCH(0), NV10_PGRAPH_CTX_SWITCH(1), NV10_PGRAPH_CTX_SWITCH(2), NV10_PGRAPH_CTX_SWITCH(3), NV10_PGRAPH_CTX_SWITCH(4), NV10_PGRAPH_CTX_CACHE(0, 0), NV10_PGRAPH_CTX_CACHE(0, 1), NV10_PGRAPH_CTX_CACHE(0, 2), NV10_PGRAPH_CTX_CACHE(0, 3), NV10_PGRAPH_CTX_CACHE(0, 4), NV10_PGRAPH_CTX_CACHE(1, 0), NV10_PGRAPH_CTX_CACHE(1, 1), NV10_PGRAPH_CTX_CACHE(1, 2), NV10_PGRAPH_CTX_CACHE(1, 3), NV10_PGRAPH_CTX_CACHE(1, 4), NV10_PGRAPH_CTX_CACHE(2, 0), NV10_PGRAPH_CTX_CACHE(2, 1), NV10_PGRAPH_CTX_CACHE(2, 2), NV10_PGRAPH_CTX_CACHE(2, 3), NV10_PGRAPH_CTX_CACHE(2, 4), NV10_PGRAPH_CTX_CACHE(3, 0), NV10_PGRAPH_CTX_CACHE(3, 1), NV10_PGRAPH_CTX_CACHE(3, 2), NV10_PGRAPH_CTX_CACHE(3, 3), NV10_PGRAPH_CTX_CACHE(3, 4), NV10_PGRAPH_CTX_CACHE(4, 0), NV10_PGRAPH_CTX_CACHE(4, 1), NV10_PGRAPH_CTX_CACHE(4, 2), NV10_PGRAPH_CTX_CACHE(4, 3), NV10_PGRAPH_CTX_CACHE(4, 4), NV10_PGRAPH_CTX_CACHE(5, 0), NV10_PGRAPH_CTX_CACHE(5, 1), NV10_PGRAPH_CTX_CACHE(5, 2), NV10_PGRAPH_CTX_CACHE(5, 3), NV10_PGRAPH_CTX_CACHE(5, 4), NV10_PGRAPH_CTX_CACHE(6, 0), NV10_PGRAPH_CTX_CACHE(6, 1), NV10_PGRAPH_CTX_CACHE(6, 2), NV10_PGRAPH_CTX_CACHE(6, 3), NV10_PGRAPH_CTX_CACHE(6, 4), NV10_PGRAPH_CTX_CACHE(7, 0), NV10_PGRAPH_CTX_CACHE(7, 1), NV10_PGRAPH_CTX_CACHE(7, 2), NV10_PGRAPH_CTX_CACHE(7, 3), NV10_PGRAPH_CTX_CACHE(7, 4), NV10_PGRAPH_CTX_USER, NV04_PGRAPH_DMA_START_0, NV04_PGRAPH_DMA_START_1, NV04_PGRAPH_DMA_LENGTH, NV04_PGRAPH_DMA_MISC, NV10_PGRAPH_DMA_PITCH, NV04_PGRAPH_BOFFSET0, NV04_PGRAPH_BBASE0, NV04_PGRAPH_BLIMIT0, NV04_PGRAPH_BOFFSET1, NV04_PGRAPH_BBASE1, NV04_PGRAPH_BLIMIT1, NV04_PGRAPH_BOFFSET2, NV04_PGRAPH_BBASE2, NV04_PGRAPH_BLIMIT2, NV04_PGRAPH_BOFFSET3, NV04_PGRAPH_BBASE3, NV04_PGRAPH_BLIMIT3, NV04_PGRAPH_BOFFSET4, NV04_PGRAPH_BBASE4, NV04_PGRAPH_BLIMIT4, NV04_PGRAPH_BOFFSET5, NV04_PGRAPH_BBASE5, NV04_PGRAPH_BLIMIT5, NV04_PGRAPH_BPITCH0, NV04_PGRAPH_BPITCH1, NV04_PGRAPH_BPITCH2, NV04_PGRAPH_BPITCH3, NV04_PGRAPH_BPITCH4, NV10_PGRAPH_SURFACE, NV10_PGRAPH_STATE, NV04_PGRAPH_BSWIZZLE2, NV04_PGRAPH_BSWIZZLE5, NV04_PGRAPH_BPIXEL, NV10_PGRAPH_NOTIFY, NV04_PGRAPH_PATT_COLOR0, NV04_PGRAPH_PATT_COLOR1, NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */ 0x00400904, 0x00400908, 0x0040090c, 0x00400910, 0x00400914, 0x00400918, 0x0040091c, 0x00400920, 0x00400924, 0x00400928, 0x0040092c, 0x00400930, 0x00400934, 0x00400938, 0x0040093c, 0x00400940, 0x00400944, 0x00400948, 0x0040094c, 0x00400950, 0x00400954, 0x00400958, 0x0040095c, 0x00400960, 0x00400964, 0x00400968, 0x0040096c, 0x00400970, 0x00400974, 0x00400978, 0x0040097c, 0x00400980, 0x00400984, 0x00400988, 0x0040098c, 0x00400990, 0x00400994, 0x00400998, 0x0040099c, 0x004009a0, 0x004009a4, 0x004009a8, 0x004009ac, 0x004009b0, 0x004009b4, 0x004009b8, 0x004009bc, 0x004009c0, 0x004009c4, 0x004009c8, 0x004009cc, 0x004009d0, 0x004009d4, 0x004009d8, 0x004009dc, 0x004009e0, 0x004009e4, 0x004009e8, 0x004009ec, 0x004009f0, 0x004009f4, 0x004009f8, 0x004009fc, NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */ 0x0040080c, NV04_PGRAPH_PATTERN_SHAPE, NV03_PGRAPH_MONO_COLOR0, NV04_PGRAPH_ROP3, NV04_PGRAPH_CHROMA, NV04_PGRAPH_BETA_AND, NV04_PGRAPH_BETA_PREMULT, 0x00400e70, 0x00400e74, 0x00400e78, 0x00400e7c, 0x00400e80, 0x00400e84, 0x00400e88, 0x00400e8c, 0x00400ea0, 0x00400ea4, 0x00400ea8, 0x00400e90, 0x00400e94, 0x00400e98, 0x00400e9c, NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */ NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */ 0x00400f04, 0x00400f24, 0x00400f08, 0x00400f28, 0x00400f0c, 0x00400f2c, 0x00400f10, 0x00400f30, 0x00400f14, 0x00400f34, 0x00400f18, 0x00400f38, 0x00400f1c, 0x00400f3c, NV10_PGRAPH_XFMODE0, NV10_PGRAPH_XFMODE1, NV10_PGRAPH_GLOBALSTATE0, NV10_PGRAPH_GLOBALSTATE1, NV04_PGRAPH_STORED_FMT, NV04_PGRAPH_SOURCE_COLOR, NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */ NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */ 0x00400404, 0x00400484, 0x00400408, 0x00400488, 0x0040040c, 0x0040048c, 0x00400410, 0x00400490, 0x00400414, 0x00400494, 0x00400418, 0x00400498, 0x0040041c, 0x0040049c, 0x00400420, 0x004004a0, 0x00400424, 0x004004a4, 0x00400428, 0x004004a8, 0x0040042c, 0x004004ac, 0x00400430, 0x004004b0, 0x00400434, 0x004004b4, 0x00400438, 0x004004b8, 0x0040043c, 0x004004bc, 0x00400440, 0x004004c0, 0x00400444, 0x004004c4, 0x00400448, 0x004004c8, 0x0040044c, 0x004004cc, 0x00400450, 0x004004d0, 0x00400454, 0x004004d4, 0x00400458, 0x004004d8, 0x0040045c, 0x004004dc, 0x00400460, 0x004004e0, 0x00400464, 0x004004e4, 0x00400468, 0x004004e8, 0x0040046c, 0x004004ec, 0x00400470, 0x004004f0, 0x00400474, 0x004004f4, 0x00400478, 0x004004f8, 0x0040047c, 0x004004fc, NV03_PGRAPH_ABS_UCLIP_XMIN, NV03_PGRAPH_ABS_UCLIP_XMAX, NV03_PGRAPH_ABS_UCLIP_YMIN, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x00400550, 0x00400558, 0x00400554, 0x0040055c, NV03_PGRAPH_ABS_UCLIPA_XMIN, NV03_PGRAPH_ABS_UCLIPA_XMAX, NV03_PGRAPH_ABS_UCLIPA_YMIN, NV03_PGRAPH_ABS_UCLIPA_YMAX, NV03_PGRAPH_ABS_ICLIP_XMAX, NV03_PGRAPH_ABS_ICLIP_YMAX, NV03_PGRAPH_XY_LOGIC_MISC0, NV03_PGRAPH_XY_LOGIC_MISC1, NV03_PGRAPH_XY_LOGIC_MISC2, NV03_PGRAPH_XY_LOGIC_MISC3, NV03_PGRAPH_CLIPX_0, NV03_PGRAPH_CLIPX_1, NV03_PGRAPH_CLIPY_0, NV03_PGRAPH_CLIPY_1, NV10_PGRAPH_COMBINER0_IN_ALPHA, NV10_PGRAPH_COMBINER1_IN_ALPHA, NV10_PGRAPH_COMBINER0_IN_RGB, NV10_PGRAPH_COMBINER1_IN_RGB, NV10_PGRAPH_COMBINER_COLOR0, NV10_PGRAPH_COMBINER_COLOR1, NV10_PGRAPH_COMBINER0_OUT_ALPHA, NV10_PGRAPH_COMBINER1_OUT_ALPHA, NV10_PGRAPH_COMBINER0_OUT_RGB, NV10_PGRAPH_COMBINER1_OUT_RGB, NV10_PGRAPH_COMBINER_FINAL0, NV10_PGRAPH_COMBINER_FINAL1, 0x00400e00, 0x00400e04, 0x00400e08, 0x00400e0c, 0x00400e10, 0x00400e14, 0x00400e18, 0x00400e1c, 0x00400e20, 0x00400e24, 0x00400e28, 0x00400e2c, 0x00400e30, 0x00400e34, 0x00400e38, 0x00400e3c, NV04_PGRAPH_PASSTHRU_0, NV04_PGRAPH_PASSTHRU_1, NV04_PGRAPH_PASSTHRU_2, NV10_PGRAPH_DIMX_TEXTURE, NV10_PGRAPH_WDIMX_TEXTURE, NV10_PGRAPH_DVD_COLORFMT, NV10_PGRAPH_SCALED_FORMAT, NV04_PGRAPH_MISC24_0, NV04_PGRAPH_MISC24_1, NV04_PGRAPH_MISC24_2, NV03_PGRAPH_X_MISC, NV03_PGRAPH_Y_MISC, NV04_PGRAPH_VALID1, NV04_PGRAPH_VALID2, }; static int nv17_graph_ctx_regs[] = { NV10_PGRAPH_DEBUG_4, 0x004006b0, 0x00400eac, 0x00400eb0, 0x00400eb4, 0x00400eb8, 0x00400ebc, 0x00400ec0, 0x00400ec4, 0x00400ec8, 0x00400ecc, 0x00400ed0, 0x00400ed4, 0x00400ed8, 0x00400edc, 0x00400ee0, 0x00400a00, 0x00400a04, }; struct graph_state { int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; struct pipe_state pipe_state; uint32_t lma_window[4]; }; #define PIPE_SAVE(dev, state, addr) \ do { \ int __i; \ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ } while (0) #define PIPE_RESTORE(dev, state, addr) \ do { \ int __i; \ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \ } while (0) static void nv10_graph_save_pipe(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct graph_state *pgraph_ctx = chan->pgraph_ctx; struct pipe_state *pipe = &pgraph_ctx->pipe_state; PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400); PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800); PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00); PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000); PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400); PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800); PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040); PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000); } static void nv10_graph_load_pipe(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct graph_state *pgraph_ctx = chan->pgraph_ctx; struct pipe_state *pipe = &pgraph_ctx->pipe_state; uint32_t xfmode0, xfmode1; int i; nouveau_wait_for_idle(dev); /* XXX check haiku comments */ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); for (i = 0; i < 4; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); for (i = 0; i < 4; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); for (i = 0; i < 3; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); for (i = 0; i < 3; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); nouveau_wait_for_idle(dev); /* restore XFMODE */ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400); PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800); PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00); PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000); PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400); PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800); PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000); PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040); nouveau_wait_for_idle(dev); } static void nv10_graph_create_pipe(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct graph_state *pgraph_ctx = chan->pgraph_ctx; struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; uint32_t *fifo_pipe_state_addr; int i; #define PIPE_INIT(addr) \ do { \ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \ } while (0) #define PIPE_INIT_END(addr) \ do { \ uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \ ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \ if (fifo_pipe_state_addr != __end_addr) \ NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \ addr, fifo_pipe_state_addr, __end_addr); \ } while (0) #define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value PIPE_INIT(0x0200); for (i = 0; i < 48; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x0200); PIPE_INIT(0x6400); for (i = 0; i < 211; i++) NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x40000000); NV_WRITE_PIPE_INIT(0x40000000); NV_WRITE_PIPE_INIT(0x40000000); NV_WRITE_PIPE_INIT(0x40000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f000000); NV_WRITE_PIPE_INIT(0x3f000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x3f800000); NV_WRITE_PIPE_INIT(0x3f800000); PIPE_INIT_END(0x6400); PIPE_INIT(0x6800); for (i = 0; i < 162; i++) NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x3f800000); for (i = 0; i < 25; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x6800); PIPE_INIT(0x6c00); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0xbf800000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x6c00); PIPE_INIT(0x7000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x00000000); NV_WRITE_PIPE_INIT(0x7149f2ca); for (i = 0; i < 35; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x7000); PIPE_INIT(0x7400); for (i = 0; i < 48; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x7400); PIPE_INIT(0x7800); for (i = 0; i < 48; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x7800); PIPE_INIT(0x4400); for (i = 0; i < 32; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x4400); PIPE_INIT(0x0000); for (i = 0; i < 16; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x0000); PIPE_INIT(0x0040); for (i = 0; i < 4; i++) NV_WRITE_PIPE_INIT(0x00000000); PIPE_INIT_END(0x0040); #undef PIPE_INIT #undef PIPE_INIT_END #undef NV_WRITE_PIPE_INIT } static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) { int i; for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) { if (nv10_graph_ctx_regs[i] == reg) return i; } NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg); return -1; } static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg) { int i; for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) { if (nv17_graph_ctx_regs[i] == reg) return i; } NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg); return -1; } static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan, uint32_t inst) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4]; uint32_t ctx_user, ctx_switch[5]; int i, subchan = -1; /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state * that cannot be restored via MMIO. Do it through the FIFO * instead. */ /* Look for a celsius object */ for (i = 0; i < 8; i++) { int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff; if (class == 0x56 || class == 0x96 || class == 0x99) { subchan = i; break; } } if (subchan < 0 || !inst) return; /* Save the current ctx object */ ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER); for (i = 0; i < 5; i++) ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i)); /* Save the FIFO state */ st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL); st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH); fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR); for (i = 0; i < ARRAY_SIZE(fifo); i++) fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i); /* Switch to the celsius subchannel */ for (i = 0; i < 5; i++) nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i))); nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13); /* Inject NV10TCL_DMA_VTXBUF */ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst); nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000); pgraph->fifo_access(dev, true); pgraph->fifo_access(dev, false); /* Restore the FIFO state */ for (i = 0; i < ARRAY_SIZE(fifo); i++) nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]); nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh); /* Restore the current ctx object */ for (i = 0; i < 5; i++) nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]); nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user); } int nv10_graph_load_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct graph_state *pgraph_ctx = chan->pgraph_ctx; uint32_t tmp; int i; for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]); if (dev_priv->chipset >= 0x17) { for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) nv_wr32(dev, nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]); } nv10_graph_load_pipe(chan); nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1) & 0xffff)); nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100); tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER); nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24); tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff); return 0; } int nv10_graph_unload_context(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; struct graph_state *ctx; uint32_t tmp; int i; chan = pgraph->channel(dev); if (!chan) return 0; ctx = chan->pgraph_ctx; for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]); if (dev_priv->chipset >= 0x17) { for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]); } nv10_graph_save_pipe(chan); nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000); tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; tmp |= (pfifo->channels - 1) << 24; nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); return 0; } void nv10_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_channel *chan = NULL; int chid; pgraph->fifo_access(dev, false); nouveau_wait_for_idle(dev); /* If previous context is valid, we need to save it */ nv10_graph_unload_context(dev); /* Load context for next channel */ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; chan = dev_priv->fifos[chid]; if (chan) nv10_graph_load_context(chan); pgraph->fifo_access(dev, true); } #define NV_WRITE_CTX(reg, val) do { \ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \ if (offset > 0) \ pgraph_ctx->nv10[offset] = val; \ } while (0) #define NV17_WRITE_CTX(reg, val) do { \ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \ if (offset > 0) \ pgraph_ctx->nv17[offset] = val; \ } while (0) struct nouveau_channel * nv10_graph_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int chid = dev_priv->engine.fifo.channels; if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000) chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24; if (chid >= dev_priv->engine.fifo.channels) return NULL; return dev_priv->fifos[chid]; } int nv10_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct graph_state *pgraph_ctx; NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id); chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL); if (pgraph_ctx == NULL) return -ENOMEM; NV_WRITE_CTX(0x00400e88, 0x08000000); NV_WRITE_CTX(0x00400e9c, 0x4b7fffff); NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff); NV_WRITE_CTX(0x00400e10, 0x00001000); NV_WRITE_CTX(0x00400e14, 0x00001000); NV_WRITE_CTX(0x00400e30, 0x00080008); NV_WRITE_CTX(0x00400e34, 0x00080008); if (dev_priv->chipset >= 0x17) { /* is it really needed ??? */ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, nv_rd32(dev, NV10_PGRAPH_DEBUG_4)); NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0)); NV17_WRITE_CTX(0x00400eac, 0x0fff0000); NV17_WRITE_CTX(0x00400eb0, 0x0fff0000); NV17_WRITE_CTX(0x00400ec0, 0x00000080); NV17_WRITE_CTX(0x00400ed0, 0x00000080); } NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24); nv10_graph_create_pipe(chan); return 0; } void nv10_graph_destroy_context(struct nouveau_channel *chan) { struct graph_state *pgraph_ctx = chan->pgraph_ctx; kfree(pgraph_ctx); chan->pgraph_ctx = NULL; } void nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, uint32_t size, uint32_t pitch) { uint32_t limit = max(1u, addr + size) - 1; if (pitch) addr |= 1 << 31; nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); } int nv10_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; int i; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000); nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700); /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9); nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1<<29) | (1<<31)); if (dev_priv->chipset >= 0x17) { nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); nv_wr32(dev, 0x400a10, 0x3ff3fb6); nv_wr32(dev, 0x400838, 0x2f8684); nv_wr32(dev, 0x40083c, 0x115f3f); nv_wr32(dev, 0x004006b0, 0x40000020); } else nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) nv10_graph_set_region_tiling(dev, i, 0, 0, 0); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000); nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF); tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff; tmp |= (dev_priv->engine.fifo.channels - 1) << 24; nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp); nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000); return 0; } void nv10_graph_takedown(struct drm_device *dev) { } static int nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, int mthd, uint32_t data) { struct drm_device *dev = chan->dev; struct graph_state *ctx = chan->pgraph_ctx; struct pipe_state *pipe = &ctx->pipe_state; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; uint32_t xfmode0, xfmode1; int i; ctx->lma_window[(mthd - 0x1638) / 4] = data; if (mthd != 0x1644) return 0; nouveau_wait_for_idle(dev); PIPE_SAVE(dev, pipe_0x0040, 0x0040); PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); PIPE_RESTORE(dev, ctx->lma_window, 0x6790); nouveau_wait_for_idle(dev); xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); PIPE_SAVE(dev, pipe_0x64c0, 0x64c0); PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0); PIPE_SAVE(dev, pipe_0x6a80, 0x6a80); nouveau_wait_for_idle(dev); nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); for (i = 0; i < 4; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); for (i = 0; i < 4; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); for (i = 0; i < 3; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); for (i = 0; i < 3; i++) nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); nouveau_wait_for_idle(dev); PIPE_RESTORE(dev, pipe_0x0040, 0x0040); nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0); PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0); PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80); PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); nouveau_wait_for_idle(dev); pgraph->fifo_access(dev, true); return 0; } static int nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, int mthd, uint32_t data) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; nouveau_wait_for_idle(dev); nv_wr32(dev, NV10_PGRAPH_DEBUG_4, nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8); nv_wr32(dev, 0x004006b0, nv_rd32(dev, 0x004006b0) | 0x8 << 24); pgraph->fifo_access(dev, true); return 0; } static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { { 0x1638, nv17_graph_mthd_lma_window }, { 0x163c, nv17_graph_mthd_lma_window }, { 0x1640, nv17_graph_mthd_lma_window }, { 0x1644, nv17_graph_mthd_lma_window }, { 0x1658, nv17_graph_mthd_lma_enable }, {} }; struct nouveau_pgraph_object_class nv10_graph_grclass[] = { { 0x0030, false, NULL }, /* null */ { 0x0039, false, NULL }, /* m2mf */ { 0x004a, false, NULL }, /* gdirect */ { 0x005f, false, NULL }, /* imageblit */ { 0x009f, false, NULL }, /* imageblit (nv12) */ { 0x008a, false, NULL }, /* ifc */ { 0x0089, false, NULL }, /* sifm */ { 0x0062, false, NULL }, /* surf2d */ { 0x0043, false, NULL }, /* rop */ { 0x0012, false, NULL }, /* beta1 */ { 0x0072, false, NULL }, /* beta4 */ { 0x0019, false, NULL }, /* cliprect */ { 0x0044, false, NULL }, /* pattern */ { 0x0052, false, NULL }, /* swzsurf */ { 0x0093, false, NULL }, /* surf3d */ { 0x0094, false, NULL }, /* tex_tri */ { 0x0095, false, NULL }, /* multitex_tri */ { 0x0056, false, NULL }, /* celcius (nv10) */ { 0x0096, false, NULL }, /* celcius (nv11) */ { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ {} };
gpl-2.0
pro4tlzz/P9000-Kernel
drivers/acpi/acpica/psargs.c
600
22390
/****************************************************************************** * * Module Name: psargs - Parse AML opcode arguments * *****************************************************************************/ /* * Copyright (C) 2000 - 2014, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acnamesp.h" #include "acdispat.h" #define _COMPONENT ACPI_PARSER ACPI_MODULE_NAME("psargs") /* Local prototypes */ static u32 acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state); static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state); /******************************************************************************* * * FUNCTION: acpi_ps_get_next_package_length * * PARAMETERS: parser_state - Current parser state object * * RETURN: Decoded package length. On completion, the AML pointer points * past the length byte or bytes. * * DESCRIPTION: Decode and return a package length field. * Note: Largest package length is 28 bits, from ACPI specification * ******************************************************************************/ static u32 acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state) { u8 *aml = parser_state->aml; u32 package_length = 0; u32 byte_count; u8 byte_zero_mask = 0x3F; /* Default [0:5] */ ACPI_FUNCTION_TRACE(ps_get_next_package_length); /* * Byte 0 bits [6:7] contain the number of additional bytes * used to encode the package length, either 0,1,2, or 3 */ byte_count = (aml[0] >> 6); parser_state->aml += ((acpi_size) byte_count + 1); /* Get bytes 3, 2, 1 as needed */ while (byte_count) { /* * Final bit positions for the package length bytes: * Byte3->[20:27] * Byte2->[12:19] * Byte1->[04:11] * Byte0->[00:03] */ package_length |= (aml[byte_count] << ((byte_count << 3) - 4)); byte_zero_mask = 0x0F; /* Use bits [0:3] of byte 0 */ byte_count--; } /* Byte 0 is a special case, either bits [0:3] or [0:5] are used */ package_length |= (aml[0] & byte_zero_mask); return_UINT32(package_length); } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_package_end * * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to end-of-package +1 * * DESCRIPTION: Get next package length and return a pointer past the end of * the package. Consumes the package length field * ******************************************************************************/ u8 *acpi_ps_get_next_package_end(struct acpi_parse_state *parser_state) { u8 *start = parser_state->aml; u32 package_length; ACPI_FUNCTION_TRACE(ps_get_next_package_end); /* Function below updates parser_state->Aml */ package_length = acpi_ps_get_next_package_length(parser_state); return_PTR(start + package_length); /* end of package */ } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_namestring * * PARAMETERS: parser_state - Current parser state object * * RETURN: Pointer to the start of the name string (pointer points into * the AML. * * DESCRIPTION: Get next raw namestring within the AML stream. Handles all name * prefix characters. Set parser state to point past the string. * (Name is consumed from the AML.) * ******************************************************************************/ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state) { u8 *start = parser_state->aml; u8 *end = parser_state->aml; ACPI_FUNCTION_TRACE(ps_get_next_namestring); /* Point past any namestring prefix characters (backslash or carat) */ while (ACPI_IS_ROOT_PREFIX(*end) || ACPI_IS_PARENT_PREFIX(*end)) { end++; } /* Decode the path prefix character */ switch (*end) { case 0: /* null_name */ if (end == start) { start = NULL; } end++; break; case AML_DUAL_NAME_PREFIX: /* Two name segments */ end += 1 + (2 * ACPI_NAME_SIZE); break; case AML_MULTI_NAME_PREFIX_OP: /* Multiple name segments, 4 chars each, count in next byte */ end += 2 + (*(end + 1) * ACPI_NAME_SIZE); break; default: /* Single name segment */ end += ACPI_NAME_SIZE; break; } parser_state->aml = end; return_PTR((char *)start); } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_namepath * * PARAMETERS: parser_state - Current parser state object * arg - Where the namepath will be stored * arg_count - If the namepath points to a control method * the method's argument is returned here. * possible_method_call - Whether the namepath can possibly be the * start of a method call * * RETURN: Status * * DESCRIPTION: Get next name (if method call, return # of required args). * Names are looked up in the internal namespace to determine * if the name represents a control method. If a method * is found, the number of arguments to the method is returned. * This information is critical for parsing to continue correctly. * ******************************************************************************/ acpi_status acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state, struct acpi_parse_state *parser_state, union acpi_parse_object *arg, u8 possible_method_call) { acpi_status status; char *path; union acpi_parse_object *name_op; union acpi_operand_object *method_desc; struct acpi_namespace_node *node; u8 *start = parser_state->aml; ACPI_FUNCTION_TRACE(ps_get_next_namepath); path = acpi_ps_get_next_namestring(parser_state); acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP); /* Null path case is allowed, just exit */ if (!path) { arg->common.value.name = path; return_ACPI_STATUS(AE_OK); } /* * Lookup the name in the internal namespace, starting with the current * scope. We don't want to add anything new to the namespace here, * however, so we use MODE_EXECUTE. * Allow searching of the parent tree, but don't open a new scope - * we just want to lookup the object (must be mode EXECUTE to perform * the upsearch) */ status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, NULL, &node); /* * If this name is a control method invocation, we must * setup the method call */ if (ACPI_SUCCESS(status) && possible_method_call && (node->type == ACPI_TYPE_METHOD)) { if (walk_state->opcode == AML_UNLOAD_OP) { /* * acpi_ps_get_next_namestring has increased the AML pointer, * so we need to restore the saved AML pointer for method call. */ walk_state->parser_state.aml = start; walk_state->arg_count = 1; acpi_ps_init_op(arg, AML_INT_METHODCALL_OP); return_ACPI_STATUS(AE_OK); } /* This name is actually a control method invocation */ method_desc = acpi_ns_get_attached_object(node); ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Control Method - %p Desc %p Path=%p\n", node, method_desc, path)); name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); if (!name_op) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Change Arg into a METHOD CALL and attach name to it */ acpi_ps_init_op(arg, AML_INT_METHODCALL_OP); name_op->common.value.name = path; /* Point METHODCALL/NAME to the METHOD Node */ name_op->common.node = node; acpi_ps_append_arg(arg, name_op); if (!method_desc) { ACPI_ERROR((AE_INFO, "Control Method %p has no attached object", node)); return_ACPI_STATUS(AE_AML_INTERNAL); } ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Control Method - %p Args %X\n", node, method_desc->method.param_count)); /* Get the number of arguments to expect */ walk_state->arg_count = method_desc->method.param_count; return_ACPI_STATUS(AE_OK); } /* * Special handling if the name was not found during the lookup - * some not_found cases are allowed */ if (status == AE_NOT_FOUND) { /* 1) not_found is ok during load pass 1/2 (allow forward references) */ if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) != ACPI_PARSE_EXECUTE) { status = AE_OK; } /* 2) not_found during a cond_ref_of(x) is ok by definition */ else if (walk_state->op->common.aml_opcode == AML_COND_REF_OF_OP) { status = AE_OK; } /* * 3) not_found while building a Package is ok at this point, we * may flag as an error later if slack mode is not enabled. * (Some ASL code depends on allowing this behavior) */ else if ((arg->common.parent) && ((arg->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (arg->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP))) { status = AE_OK; } } /* Final exception check (may have been changed from code above) */ if (ACPI_FAILURE(status)) { ACPI_ERROR_NAMESPACE(path, status); if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) == ACPI_PARSE_EXECUTE) { /* Report a control method execution error */ status = acpi_ds_method_error(status, walk_state); } } /* Save the namepath */ arg->common.value.name = path; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_simple_arg * * PARAMETERS: parser_state - Current parser state object * arg_type - The argument type (AML_*_ARG) * arg - Where the argument is returned * * RETURN: None * * DESCRIPTION: Get the next simple argument (constant, string, or namestring) * ******************************************************************************/ void acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state, u32 arg_type, union acpi_parse_object *arg) { u32 length; u16 opcode; u8 *aml = parser_state->aml; ACPI_FUNCTION_TRACE_U32(ps_get_next_simple_arg, arg_type); switch (arg_type) { case ARGP_BYTEDATA: /* Get 1 byte from the AML stream */ opcode = AML_BYTE_OP; arg->common.value.integer = (u64) *aml; length = 1; break; case ARGP_WORDDATA: /* Get 2 bytes from the AML stream */ opcode = AML_WORD_OP; ACPI_MOVE_16_TO_64(&arg->common.value.integer, aml); length = 2; break; case ARGP_DWORDDATA: /* Get 4 bytes from the AML stream */ opcode = AML_DWORD_OP; ACPI_MOVE_32_TO_64(&arg->common.value.integer, aml); length = 4; break; case ARGP_QWORDDATA: /* Get 8 bytes from the AML stream */ opcode = AML_QWORD_OP; ACPI_MOVE_64_TO_64(&arg->common.value.integer, aml); length = 8; break; case ARGP_CHARLIST: /* Get a pointer to the string, point past the string */ opcode = AML_STRING_OP; arg->common.value.string = ACPI_CAST_PTR(char, aml); /* Find the null terminator */ length = 0; while (aml[length]) { length++; } length++; break; case ARGP_NAME: case ARGP_NAMESTRING: acpi_ps_init_op(arg, AML_INT_NAMEPATH_OP); arg->common.value.name = acpi_ps_get_next_namestring(parser_state); return_VOID; default: ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type)); return_VOID; } acpi_ps_init_op(arg, opcode); parser_state->aml += length; return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_field * * PARAMETERS: parser_state - Current parser state object * * RETURN: A newly allocated FIELD op * * DESCRIPTION: Get next field (named_field, reserved_field, or access_field) * ******************************************************************************/ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state *parser_state) { u32 aml_offset; union acpi_parse_object *field; union acpi_parse_object *arg = NULL; u16 opcode; u32 name; u8 access_type; u8 access_attribute; u8 access_length; u32 pkg_length; u8 *pkg_end; u32 buffer_length; ACPI_FUNCTION_TRACE(ps_get_next_field); aml_offset = (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start); /* Determine field type */ switch (ACPI_GET8(parser_state->aml)) { case AML_FIELD_OFFSET_OP: opcode = AML_INT_RESERVEDFIELD_OP; parser_state->aml++; break; case AML_FIELD_ACCESS_OP: opcode = AML_INT_ACCESSFIELD_OP; parser_state->aml++; break; case AML_FIELD_CONNECTION_OP: opcode = AML_INT_CONNECTION_OP; parser_state->aml++; break; case AML_FIELD_EXT_ACCESS_OP: opcode = AML_INT_EXTACCESSFIELD_OP; parser_state->aml++; break; default: opcode = AML_INT_NAMEDFIELD_OP; break; } /* Allocate a new field op */ field = acpi_ps_alloc_op(opcode); if (!field) { return_PTR(NULL); } field->common.aml_offset = aml_offset; /* Decode the field type */ switch (opcode) { case AML_INT_NAMEDFIELD_OP: /* Get the 4-character name */ ACPI_MOVE_32_TO_32(&name, parser_state->aml); acpi_ps_set_name(field, name); parser_state->aml += ACPI_NAME_SIZE; /* Get the length which is encoded as a package length */ field->common.value.size = acpi_ps_get_next_package_length(parser_state); break; case AML_INT_RESERVEDFIELD_OP: /* Get the length which is encoded as a package length */ field->common.value.size = acpi_ps_get_next_package_length(parser_state); break; case AML_INT_ACCESSFIELD_OP: case AML_INT_EXTACCESSFIELD_OP: /* * Get access_type and access_attrib and merge into the field Op * access_type is first operand, access_attribute is second. stuff * these bytes into the node integer value for convenience. */ /* Get the two bytes (Type/Attribute) */ access_type = ACPI_GET8(parser_state->aml); parser_state->aml++; access_attribute = ACPI_GET8(parser_state->aml); parser_state->aml++; field->common.value.integer = (u8)access_type; field->common.value.integer |= (u16)(access_attribute << 8); /* This opcode has a third byte, access_length */ if (opcode == AML_INT_EXTACCESSFIELD_OP) { access_length = ACPI_GET8(parser_state->aml); parser_state->aml++; field->common.value.integer |= (u32)(access_length << 16); } break; case AML_INT_CONNECTION_OP: /* * Argument for Connection operator can be either a Buffer * (resource descriptor), or a name_string. */ if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) { parser_state->aml++; pkg_end = parser_state->aml; pkg_length = acpi_ps_get_next_package_length(parser_state); pkg_end += pkg_length; if (parser_state->aml < pkg_end) { /* Non-empty list */ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); if (!arg) { acpi_ps_free_op(field); return_PTR(NULL); } /* Get the actual buffer length argument */ opcode = ACPI_GET8(parser_state->aml); parser_state->aml++; switch (opcode) { case AML_BYTE_OP: /* AML_BYTEDATA_ARG */ buffer_length = ACPI_GET8(parser_state->aml); parser_state->aml += 1; break; case AML_WORD_OP: /* AML_WORDDATA_ARG */ buffer_length = ACPI_GET16(parser_state->aml); parser_state->aml += 2; break; case AML_DWORD_OP: /* AML_DWORDATA_ARG */ buffer_length = ACPI_GET32(parser_state->aml); parser_state->aml += 4; break; default: buffer_length = 0; break; } /* Fill in bytelist data */ arg->named.value.size = buffer_length; arg->named.data = parser_state->aml; } /* Skip to End of byte data */ parser_state->aml = pkg_end; } else { arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); if (!arg) { acpi_ps_free_op(field); return_PTR(NULL); } /* Get the Namestring argument */ arg->common.value.name = acpi_ps_get_next_namestring(parser_state); } /* Link the buffer/namestring to parent (CONNECTION_OP) */ acpi_ps_append_arg(field, arg); break; default: /* Opcode was set in previous switch */ break; } return_PTR(field); } /******************************************************************************* * * FUNCTION: acpi_ps_get_next_arg * * PARAMETERS: walk_state - Current state * parser_state - Current parser state object * arg_type - The argument type (AML_*_ARG) * return_arg - Where the next arg is returned * * RETURN: Status, and an op object containing the next argument. * * DESCRIPTION: Get next argument (including complex list arguments that require * pushing the parser stack) * ******************************************************************************/ acpi_status acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, struct acpi_parse_state *parser_state, u32 arg_type, union acpi_parse_object **return_arg) { union acpi_parse_object *arg = NULL; union acpi_parse_object *prev = NULL; union acpi_parse_object *field; u32 subop; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ps_get_next_arg, parser_state); switch (arg_type) { case ARGP_BYTEDATA: case ARGP_WORDDATA: case ARGP_DWORDDATA: case ARGP_CHARLIST: case ARGP_NAME: case ARGP_NAMESTRING: /* Constants, strings, and namestrings are all the same size */ arg = acpi_ps_alloc_op(AML_BYTE_OP); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } acpi_ps_get_next_simple_arg(parser_state, arg_type, arg); break; case ARGP_PKGLENGTH: /* Package length, nothing returned */ parser_state->pkg_end = acpi_ps_get_next_package_end(parser_state); break; case ARGP_FIELDLIST: if (parser_state->aml < parser_state->pkg_end) { /* Non-empty list */ while (parser_state->aml < parser_state->pkg_end) { field = acpi_ps_get_next_field(parser_state); if (!field) { return_ACPI_STATUS(AE_NO_MEMORY); } if (prev) { prev->common.next = field; } else { arg = field; } prev = field; } /* Skip to End of byte data */ parser_state->aml = parser_state->pkg_end; } break; case ARGP_BYTELIST: if (parser_state->aml < parser_state->pkg_end) { /* Non-empty list */ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Fill in bytelist data */ arg->common.value.size = (u32) ACPI_PTR_DIFF(parser_state->pkg_end, parser_state->aml); arg->named.data = parser_state->aml; /* Skip to End of byte data */ parser_state->aml = parser_state->pkg_end; } break; case ARGP_TARGET: case ARGP_SUPERNAME: case ARGP_SIMPLENAME: subop = acpi_ps_peek_opcode(parser_state); if (subop == 0 || acpi_ps_is_leading_char(subop) || ACPI_IS_ROOT_PREFIX(subop) || ACPI_IS_PARENT_PREFIX(subop)) { /* null_name or name_string */ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP); if (!arg) { return_ACPI_STATUS(AE_NO_MEMORY); } /* To support super_name arg of Unload */ if (walk_state->opcode == AML_UNLOAD_OP) { status = acpi_ps_get_next_namepath(walk_state, parser_state, arg, 1); /* * If the super_name arg of Unload is a method call, * we have restored the AML pointer, just free this Arg */ if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { acpi_ps_free_op(arg); arg = NULL; } } else { status = acpi_ps_get_next_namepath(walk_state, parser_state, arg, 0); } } else { /* Single complex argument, nothing returned */ walk_state->arg_count = 1; } break; case ARGP_DATAOBJ: case ARGP_TERMARG: /* Single complex argument, nothing returned */ walk_state->arg_count = 1; break; case ARGP_DATAOBJLIST: case ARGP_TERMLIST: case ARGP_OBJLIST: if (parser_state->aml < parser_state->pkg_end) { /* Non-empty list of variable arguments, nothing returned */ walk_state->arg_count = ACPI_VAR_ARGS; } break; default: ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type)); status = AE_AML_OPERAND_TYPE; break; } *return_arg = arg; return_ACPI_STATUS(status); }
gpl-2.0
nks15/nks_kernel_j7xeltektt
drivers/acpi/acpica/utxfmutex.c
600
6298
/******************************************************************************* * * Module Name: utxfmutex - external AML mutex access functions * ******************************************************************************/ /* * Copyright (C) 2000 - 2014, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utxfmutex") /* Local prototypes */ static acpi_status acpi_ut_get_mutex_object(acpi_handle handle, acpi_string pathname, union acpi_operand_object **ret_obj); /******************************************************************************* * * FUNCTION: acpi_ut_get_mutex_object * * PARAMETERS: handle - Mutex or prefix handle (optional) * pathname - Mutex pathname (optional) * ret_obj - Where the mutex object is returned * * RETURN: Status * * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by * Handle:Pathname. Either Handle or Pathname can be NULL, but * not both. * ******************************************************************************/ static acpi_status acpi_ut_get_mutex_object(acpi_handle handle, acpi_string pathname, union acpi_operand_object **ret_obj) { struct acpi_namespace_node *mutex_node; union acpi_operand_object *mutex_obj; acpi_status status; /* Parameter validation */ if (!ret_obj || (!handle && !pathname)) { return (AE_BAD_PARAMETER); } /* Get a the namespace node for the mutex */ mutex_node = handle; if (pathname != NULL) { status = acpi_get_handle(handle, pathname, ACPI_CAST_PTR(acpi_handle, &mutex_node)); if (ACPI_FAILURE(status)) { return (status); } } /* Ensure that we actually have a Mutex object */ if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) { return (AE_TYPE); } /* Get the low-level mutex object */ mutex_obj = acpi_ns_get_attached_object(mutex_node); if (!mutex_obj) { return (AE_NULL_OBJECT); } *ret_obj = mutex_obj; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_acquire_mutex * * PARAMETERS: handle - Mutex or prefix handle (optional) * pathname - Mutex pathname (optional) * timeout - Max time to wait for the lock (millisec) * * RETURN: Status * * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to * AML mutex objects, and allows for transaction locking between * drivers and AML code. The mutex node is pointed to by * Handle:Pathname. Either Handle or Pathname can be NULL, but * not both. * ******************************************************************************/ acpi_status acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout) { acpi_status status; union acpi_operand_object *mutex_obj; /* Get the low-level mutex associated with Handle:Pathname */ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj); if (ACPI_FAILURE(status)) { return (status); } /* Acquire the OS mutex */ status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout); return (status); } /******************************************************************************* * * FUNCTION: acpi_release_mutex * * PARAMETERS: handle - Mutex or prefix handle (optional) * pathname - Mutex pathname (optional) * * RETURN: Status * * DESCRIPTION: Release an AML mutex. This is a device driver interface to * AML mutex objects, and allows for transaction locking between * drivers and AML code. The mutex node is pointed to by * Handle:Pathname. Either Handle or Pathname can be NULL, but * not both. * ******************************************************************************/ acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname) { acpi_status status; union acpi_operand_object *mutex_obj; /* Get the low-level mutex associated with Handle:Pathname */ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj); if (ACPI_FAILURE(status)) { return (status); } /* Release the OS mutex */ acpi_os_release_mutex(mutex_obj->mutex.os_mutex); return (AE_OK); }
gpl-2.0
wangxingchao/oriole
kernel/trace/trace_sysprof.c
856
6658
/* * trace stack traces * * Copyright (C) 2004-2008, Soeren Sandmann * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> */ #include <linux/kallsyms.h> #include <linux/debugfs.h> #include <linux/hrtimer.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/fs.h> #include <asm/stacktrace.h> #include "trace.h" static struct trace_array *sysprof_trace; static int __read_mostly tracer_enabled; /* * 1 msec sample interval by default: */ static unsigned long sample_period = 1000000; static const unsigned int sample_max_depth = 512; static DEFINE_MUTEX(sample_timer_lock); /* * Per CPU hrtimers that do the profiling: */ static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); struct stack_frame { const void __user *next_fp; unsigned long return_address; }; static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) { int ret; if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) return 0; ret = 1; pagefault_disable(); if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) ret = 0; pagefault_enable(); return ret; } struct backtrace_info { struct trace_array_cpu *data; struct trace_array *tr; int pos; }; static void backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) { /* Ignore warnings */ } static void backtrace_warning(void *data, char *msg) { /* Ignore warnings */ } static int backtrace_stack(void *data, char *name) { /* Don't bother with IRQ stacks for now */ return -1; } static void backtrace_address(void *data, unsigned long addr, int reliable) { struct backtrace_info *info = data; if (info->pos < sample_max_depth && reliable) { __trace_special(info->tr, info->data, 1, addr, 0); info->pos++; } } static const struct stacktrace_ops backtrace_ops = { .warning = backtrace_warning, .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, .walk_stack = print_context_stack, }; static int trace_kernel(struct pt_regs *regs, struct trace_array *tr, struct trace_array_cpu *data) { struct backtrace_info info; unsigned long bp; char *stack; info.tr = tr; info.data = data; info.pos = 1; __trace_special(info.tr, info.data, 1, regs->ip, 0); stack = ((char *)regs + sizeof(struct pt_regs)); #ifdef CONFIG_FRAME_POINTER bp = regs->bp; #else bp = 0; #endif dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); return info.pos; } static void timer_notify(struct pt_regs *regs, int cpu) { struct trace_array_cpu *data; struct stack_frame frame; struct trace_array *tr; const void __user *fp; int is_user; int i; if (!regs) return; tr = sysprof_trace; data = tr->data[cpu]; is_user = user_mode(regs); if (!current || current->pid == 0) return; if (is_user && current->state != TASK_RUNNING) return; __trace_special(tr, data, 0, 0, current->pid); if (!is_user) i = trace_kernel(regs, tr, data); else i = 0; /* * Trace user stack if we are not a kernel thread */ if (current->mm && i < sample_max_depth) { regs = (struct pt_regs *)current->thread.sp0 - 1; fp = (void __user *)regs->bp; __trace_special(tr, data, 2, regs->ip, 0); while (i < sample_max_depth) { frame.next_fp = NULL; frame.return_address = 0; if (!copy_stack_frame(fp, &frame)) break; if ((unsigned long)fp < regs->sp) break; __trace_special(tr, data, 2, frame.return_address, (unsigned long)fp); fp = frame.next_fp; i++; } } /* * Special trace entry if we overflow the max depth: */ if (i == sample_max_depth) __trace_special(tr, data, -1, -1, -1); __trace_special(tr, data, 3, current->pid, i); } static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) { /* trace here */ timer_notify(get_irq_regs(), smp_processor_id()); hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); return HRTIMER_RESTART; } static void start_stack_timer(void *unused) { struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = stack_trace_timer_fn; hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL_PINNED); } static void start_stack_timers(void) { on_each_cpu(start_stack_timer, NULL, 1); } static void stop_stack_timer(int cpu) { struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); hrtimer_cancel(hrtimer); } static void stop_stack_timers(void) { int cpu; for_each_online_cpu(cpu) stop_stack_timer(cpu); } static void stop_stack_trace(struct trace_array *tr) { mutex_lock(&sample_timer_lock); stop_stack_timers(); tracer_enabled = 0; mutex_unlock(&sample_timer_lock); } static int stack_trace_init(struct trace_array *tr) { sysprof_trace = tr; tracing_start_cmdline_record(); mutex_lock(&sample_timer_lock); start_stack_timers(); tracer_enabled = 1; mutex_unlock(&sample_timer_lock); return 0; } static void stack_trace_reset(struct trace_array *tr) { tracing_stop_cmdline_record(); stop_stack_trace(tr); } static struct tracer stack_trace __read_mostly = { .name = "sysprof", .init = stack_trace_init, .reset = stack_trace_reset, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_sysprof, #endif }; __init static int init_stack_trace(void) { return register_tracer(&stack_trace); } device_initcall(init_stack_trace); #define MAX_LONG_DIGITS 22 static ssize_t sysprof_sample_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_LONG_DIGITS]; int r; r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period)); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t sysprof_sample_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_LONG_DIGITS]; unsigned long val; if (cnt > MAX_LONG_DIGITS-1) cnt = MAX_LONG_DIGITS-1; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; val = simple_strtoul(buf, NULL, 10); /* * Enforce a minimum sample period of 100 usecs: */ if (val < 100) val = 100; mutex_lock(&sample_timer_lock); stop_stack_timers(); sample_period = val * 1000; start_stack_timers(); mutex_unlock(&sample_timer_lock); return cnt; } static const struct file_operations sysprof_sample_fops = { .read = sysprof_sample_read, .write = sysprof_sample_write, }; void init_tracer_sysprof_debugfs(struct dentry *d_tracer) { trace_create_file("sysprof_sample_period", 0644, d_tracer, NULL, &sysprof_sample_fops); }
gpl-2.0
lollipop-og/kernel_msm
sound/soc/s6000/s6000-pcm.c
856
14165
/* * ALSA PCM interface for the Stetch s6000 family * * Author: Daniel Gloeckner, <dg@emlix.com> * Copyright: (C) 2009 emlix GmbH <info@emlix.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/dma.h> #include <variant/dmac.h> #include "s6000-pcm.h" #define S6_PCM_PREALLOCATE_SIZE (96 * 1024) #define S6_PCM_PREALLOCATE_MAX (2048 * 1024) static struct snd_pcm_hardware s6000_pcm_hardware = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE), .rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 | \ SNDRV_PCM_RATE_8000_192000), .rate_min = 0, .rate_max = 1562500, .channels_min = 2, .channels_max = 8, .buffer_bytes_max = 0x7ffffff0, .period_bytes_min = 16, .period_bytes_max = 0xfffff0, .periods_min = 2, .periods_max = 1024, /* no limit */ .fifo_size = 0, }; struct s6000_runtime_data { spinlock_t lock; int period; /* current DMA period */ }; static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd = runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; int channel; unsigned int period_size; unsigned int dma_offset; dma_addr_t dma_pos; dma_addr_t src, dst; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); period_size = snd_pcm_lib_period_bytes(substream); dma_offset = prtd->period * period_size; dma_pos = runtime->dma_addr + dma_offset; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { src = dma_pos; dst = par->sif_out; channel = par->dma_out; } else { src = par->sif_in; dst = dma_pos; channel = par->dma_in; } if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) return; if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) { printk(KERN_ERR "s6000-pcm: fifo full\n"); return; } BUG_ON(period_size & 15); s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel), src, dst, period_size); prtd->period++; if (unlikely(prtd->period >= runtime->periods)) prtd->period = 0; } static irqreturn_t s6000_pcm_irq(int irq, void *data) { struct snd_pcm *pcm = data; struct snd_soc_pcm_runtime *runtime = pcm->private_data; struct s6000_runtime_data *prtd; unsigned int has_xrun; int i, ret = IRQ_NONE; for (i = 0; i < 2; ++i) { struct snd_pcm_substream *substream = pcm->streams[i].substream; struct s6000_pcm_dma_params *params = snd_soc_dai_get_dma_data(runtime->cpu_dai, substream); u32 channel; unsigned int pending; if (substream == SNDRV_PCM_STREAM_PLAYBACK) channel = params->dma_out; else channel = params->dma_in; has_xrun = params->check_xrun(runtime->cpu_dai); if (!channel) continue; if (unlikely(has_xrun & (1 << i)) && substream->runtime && snd_pcm_running(substream)) { dev_dbg(pcm->dev, "xrun\n"); snd_pcm_stream_lock(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock(substream); ret = IRQ_HANDLED; } pending = s6dmac_int_sources(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel)); if (pending & 1) { ret = IRQ_HANDLED; if (likely(substream->runtime && snd_pcm_running(substream))) { snd_pcm_period_elapsed(substream); dev_dbg(pcm->dev, "period elapsed %x %x\n", s6dmac_cur_src(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel)), s6dmac_cur_dst(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))); prtd = substream->runtime->private_data; spin_lock(&prtd->lock); s6000_pcm_enqueue_dma(substream); spin_unlock(&prtd->lock); } } if (unlikely(pending & ~7)) { if (pending & (1 << 3)) printk(KERN_WARNING "s6000-pcm: DMA %x Underflow\n", channel); if (pending & (1 << 4)) printk(KERN_WARNING "s6000-pcm: DMA %x Overflow\n", channel); if (pending & 0x1e0) printk(KERN_WARNING "s6000-pcm: DMA %x Master Error " "(mask %x)\n", channel, pending >> 5); } } return ret; } static int s6000_pcm_start(struct snd_pcm_substream *substream) { struct s6000_runtime_data *prtd = substream->runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; unsigned long flags; int srcinc; u32 dma; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); spin_lock_irqsave(&prtd->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { srcinc = 1; dma = par->dma_out; } else { srcinc = 0; dma = par->dma_in; } s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma), 1 /* priority 1 (0 is max) */, 0 /* peripheral requests w/o xfer length mode */, srcinc /* source address increment */, srcinc^1 /* destination address increment */, 0 /* chunksize 0 (skip impossible on this dma) */, 0 /* source skip after chunk (impossible) */, 0 /* destination skip after chunk (impossible) */, 4 /* 16 byte burst size */, -1 /* don't conserve bandwidth */, 0 /* low watermark irq descriptor threshold */, 0 /* disable hardware timestamps */, 1 /* enable channel */); s6000_pcm_enqueue_dma(substream); s6000_pcm_enqueue_dma(substream); spin_unlock_irqrestore(&prtd->lock, flags); return 0; } static int s6000_pcm_stop(struct snd_pcm_substream *substream) { struct s6000_runtime_data *prtd = substream->runtime->private_data; struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; unsigned long flags; u32 channel; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) channel = par->dma_out; else channel = par->dma_in; s6dmac_set_terminal_count(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel), 0); spin_lock_irqsave(&prtd->lock, flags); s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel)); spin_unlock_irqrestore(&prtd->lock, flags); return 0; } static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; int ret; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); ret = par->trigger(substream, cmd, 0); if (ret < 0) return ret; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ret = s6000_pcm_start(substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ret = s6000_pcm_stop(substream); break; default: ret = -EINVAL; } if (ret < 0) return ret; return par->trigger(substream, cmd, 1); } static int s6000_pcm_prepare(struct snd_pcm_substream *substream) { struct s6000_runtime_data *prtd = substream->runtime->private_data; prtd->period = 0; return 0; } static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd = runtime->private_data; unsigned long flags; unsigned int offset; dma_addr_t count; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); spin_lock_irqsave(&prtd->lock, flags); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out), DMA_INDEX_CHNL(par->dma_out)); else count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in), DMA_INDEX_CHNL(par->dma_in)); count -= runtime->dma_addr; spin_unlock_irqrestore(&prtd->lock, flags); offset = bytes_to_frames(runtime, count); if (unlikely(offset >= runtime->buffer_size)) offset = 0; return offset; } static int s6000_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd; int ret; par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware); ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16); if (ret < 0) return ret; ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16); if (ret < 0) return ret; ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) return ret; if (par->same_rate) { int rate; spin_lock(&par->lock); /* needed? */ rate = par->rate; spin_unlock(&par->lock); if (rate != -1) { ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, rate, rate); if (ret < 0) return ret; } } prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; spin_lock_init(&prtd->lock); runtime->private_data = prtd; return 0; } static int s6000_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct s6000_runtime_data *prtd = runtime->private_data; kfree(prtd); return 0; } static int s6000_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par; int ret; ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (ret < 0) { printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n"); return ret; } par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); if (par->same_rate) { spin_lock(&par->lock); if (par->rate == -1 || !(par->in_use & ~(1 << substream->stream))) { par->rate = params_rate(hw_params); par->in_use |= 1 << substream->stream; } else if (params_rate(hw_params) != par->rate) { snd_pcm_lib_free_pages(substream); par->in_use &= ~(1 << substream->stream); ret = -EBUSY; } spin_unlock(&par->lock); } return ret; } static int s6000_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *soc_runtime = substream->private_data; struct s6000_pcm_dma_params *par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream); spin_lock(&par->lock); par->in_use &= ~(1 << substream->stream); if (!par->in_use) par->rate = -1; spin_unlock(&par->lock); return snd_pcm_lib_free_pages(substream); } static struct snd_pcm_ops s6000_pcm_ops = { .open = s6000_pcm_open, .close = s6000_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = s6000_pcm_hw_params, .hw_free = s6000_pcm_hw_free, .trigger = s6000_pcm_trigger, .prepare = s6000_pcm_prepare, .pointer = s6000_pcm_pointer, }; static void s6000_pcm_free(struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *runtime = pcm->private_data; struct s6000_pcm_dma_params *params = snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); free_irq(params->irq, pcm); snd_pcm_lib_preallocate_free_for_all(pcm); } static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32); static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime) { struct snd_card *card = runtime->card->snd_card; struct snd_pcm *pcm = runtime->pcm; struct s6000_pcm_dma_params *params; int res; params = snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream); if (!card->dev->dma_mask) card->dev->dma_mask = &s6000_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (params->dma_in) { s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in), DMA_INDEX_CHNL(params->dma_in)); s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in), DMA_INDEX_CHNL(params->dma_in)); } if (params->dma_out) { s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out), DMA_INDEX_CHNL(params->dma_out)); s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out), DMA_INDEX_CHNL(params->dma_out)); } res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED, "s6000-audio", pcm); if (res) { printk(KERN_ERR "s6000-pcm couldn't get IRQ\n"); return res; } res = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, card->dev, S6_PCM_PREALLOCATE_SIZE, S6_PCM_PREALLOCATE_MAX); if (res) printk(KERN_WARNING "s6000-pcm: preallocation failed\n"); spin_lock_init(&params->lock); params->in_use = 0; params->rate = -1; return 0; } static struct snd_soc_platform_driver s6000_soc_platform = { .ops = &s6000_pcm_ops, .pcm_new = s6000_pcm_new, .pcm_free = s6000_pcm_free, }; static int __devinit s6000_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform); } static int __devexit s6000_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver s6000_pcm_driver = { .driver = { .name = "s6000-pcm-audio", .owner = THIS_MODULE, }, .probe = s6000_soc_platform_probe, .remove = __devexit_p(s6000_soc_platform_remove), }; module_platform_driver(s6000_pcm_driver); MODULE_AUTHOR("Daniel Gloeckner"); MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
ThinkingBridge/platform_kernel_lge_hammerhead
sound/core/vmaster.c
1624
12469
/* * Virtual master and slave controls * * Copyright (c) 2008 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> /* * a subset of information returned via ctl info callback */ struct link_ctl_info { snd_ctl_elem_type_t type; /* value type */ int count; /* item count */ int min_val, max_val; /* min, max values */ }; /* * link master - this contains a list of slave controls that are * identical types, i.e. info returns the same value type and value * ranges, but may have different number of counts. * * The master control is so far only mono volume/switch for simplicity. * The same value will be applied to all slaves. */ struct link_master { struct list_head slaves; struct link_ctl_info info; int val; /* the master value */ unsigned int tlv[4]; void (*hook)(void *private_data, int); void *hook_private_data; }; /* * link slave - this contains a slave control element * * It fakes the control callbacsk with additional attenuation by the * master control. A slave may have either one or two channels. */ struct link_slave { struct list_head list; struct link_master *master; struct link_ctl_info info; int vals[2]; /* current values */ unsigned int flags; struct snd_kcontrol *kctl; /* original kcontrol pointer */ struct snd_kcontrol slave; /* the copy of original control entry */ }; static int slave_update(struct link_slave *slave) { struct snd_ctl_elem_value *uctl; int err, ch; uctl = kmalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return -ENOMEM; uctl->id = slave->slave.id; err = slave->slave.get(&slave->slave, uctl); for (ch = 0; ch < slave->info.count; ch++) slave->vals[ch] = uctl->value.integer.value[ch]; kfree(uctl); return 0; } /* get the slave ctl info and save the initial values */ static int slave_init(struct link_slave *slave) { struct snd_ctl_elem_info *uinfo; int err; if (slave->info.count) { /* already initialized */ if (slave->flags & SND_CTL_SLAVE_NEED_UPDATE) return slave_update(slave); return 0; } uinfo = kmalloc(sizeof(*uinfo), GFP_KERNEL); if (!uinfo) return -ENOMEM; uinfo->id = slave->slave.id; err = slave->slave.info(&slave->slave, uinfo); if (err < 0) { kfree(uinfo); return err; } slave->info.type = uinfo->type; slave->info.count = uinfo->count; if (slave->info.count > 2 || (slave->info.type != SNDRV_CTL_ELEM_TYPE_INTEGER && slave->info.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN)) { snd_printk(KERN_ERR "invalid slave element\n"); kfree(uinfo); return -EINVAL; } slave->info.min_val = uinfo->value.integer.min; slave->info.max_val = uinfo->value.integer.max; kfree(uinfo); return slave_update(slave); } /* initialize master volume */ static int master_init(struct link_master *master) { struct link_slave *slave; if (master->info.count) return 0; /* already initialized */ list_for_each_entry(slave, &master->slaves, list) { int err = slave_init(slave); if (err < 0) return err; master->info = slave->info; master->info.count = 1; /* always mono */ /* set full volume as default (= no attenuation) */ master->val = master->info.max_val; if (master->hook) master->hook(master->hook_private_data, master->val); return 1; } return -ENOENT; } static int slave_get_val(struct link_slave *slave, struct snd_ctl_elem_value *ucontrol) { int err, ch; err = slave_init(slave); if (err < 0) return err; for (ch = 0; ch < slave->info.count; ch++) ucontrol->value.integer.value[ch] = slave->vals[ch]; return 0; } static int slave_put_val(struct link_slave *slave, struct snd_ctl_elem_value *ucontrol) { int err, ch, vol; err = master_init(slave->master); if (err < 0) return err; switch (slave->info.type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: for (ch = 0; ch < slave->info.count; ch++) ucontrol->value.integer.value[ch] &= !!slave->master->val; break; case SNDRV_CTL_ELEM_TYPE_INTEGER: for (ch = 0; ch < slave->info.count; ch++) { /* max master volume is supposed to be 0 dB */ vol = ucontrol->value.integer.value[ch]; vol += slave->master->val - slave->master->info.max_val; if (vol < slave->info.min_val) vol = slave->info.min_val; else if (vol > slave->info.max_val) vol = slave->info.max_val; ucontrol->value.integer.value[ch] = vol; } break; } return slave->slave.put(&slave->slave, ucontrol); } /* * ctl callbacks for slaves */ static int slave_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); return slave->slave.info(&slave->slave, uinfo); } static int slave_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); return slave_get_val(slave, ucontrol); } static int slave_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); int err, ch, changed = 0; err = slave_init(slave); if (err < 0) return err; for (ch = 0; ch < slave->info.count; ch++) { if (slave->vals[ch] != ucontrol->value.integer.value[ch]) { changed = 1; slave->vals[ch] = ucontrol->value.integer.value[ch]; } } if (!changed) return 0; err = slave_put_val(slave, ucontrol); if (err < 0) return err; return 1; } static int slave_tlv_cmd(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); /* FIXME: this assumes that the max volume is 0 dB */ return slave->slave.tlv.c(&slave->slave, op_flag, size, tlv); } static void slave_free(struct snd_kcontrol *kcontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); if (slave->slave.private_free) slave->slave.private_free(&slave->slave); if (slave->master) list_del(&slave->list); kfree(slave); } /* * Add a slave control to the group with the given master control * * All slaves must be the same type (returning the same information * via info callback). The function doesn't check it, so it's your * responsibility. * * Also, some additional limitations: * - at most two channels * - logarithmic volume control (dB level), no linear volume * - master can only attenuate the volume, no gain */ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave, unsigned int flags) { struct link_master *master_link = snd_kcontrol_chip(master); struct link_slave *srec; srec = kzalloc(sizeof(*srec) + slave->count * sizeof(*slave->vd), GFP_KERNEL); if (!srec) return -ENOMEM; srec->kctl = slave; srec->slave = *slave; memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); srec->master = master_link; srec->flags = flags; /* override callbacks */ slave->info = slave_info; slave->get = slave_get; slave->put = slave_put; if (slave->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) slave->tlv.c = slave_tlv_cmd; slave->private_data = srec; slave->private_free = slave_free; list_add_tail(&srec->list, &master_link->slaves); return 0; } EXPORT_SYMBOL(_snd_ctl_add_slave); /* * ctl callbacks for master controls */ static int master_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct link_master *master = snd_kcontrol_chip(kcontrol); int ret; ret = master_init(master); if (ret < 0) return ret; uinfo->type = master->info.type; uinfo->count = master->info.count; uinfo->value.integer.min = master->info.min_val; uinfo->value.integer.max = master->info.max_val; return 0; } static int master_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); int err = master_init(master); if (err < 0) return err; ucontrol->value.integer.value[0] = master->val; return 0; } static int master_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); struct link_slave *slave; struct snd_ctl_elem_value *uval; int err, old_val; err = master_init(master); if (err < 0) return err; old_val = master->val; if (ucontrol->value.integer.value[0] == old_val) return 0; uval = kmalloc(sizeof(*uval), GFP_KERNEL); if (!uval) return -ENOMEM; list_for_each_entry(slave, &master->slaves, list) { master->val = old_val; uval->id = slave->slave.id; slave_get_val(slave, uval); master->val = ucontrol->value.integer.value[0]; slave_put_val(slave, uval); } kfree(uval); if (master->hook && !err) master->hook(master->hook_private_data, master->val); return 1; } static void master_free(struct snd_kcontrol *kcontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); struct link_slave *slave, *n; /* free all slave links and retore the original slave kctls */ list_for_each_entry_safe(slave, n, &master->slaves, list) { struct snd_kcontrol *sctl = slave->kctl; struct list_head olist = sctl->list; memcpy(sctl, &slave->slave, sizeof(*sctl)); memcpy(sctl->vd, slave->slave.vd, sctl->count * sizeof(*sctl->vd)); sctl->list = olist; /* keep the current linked-list */ kfree(slave); } kfree(master); } /** * snd_ctl_make_virtual_master - Create a virtual master control * @name: name string of the control element to create * @tlv: optional TLV int array for dB information * * Creates a virtual matster control with the given name string. * Returns the created control element, or NULL for errors (ENOMEM). * * After creating a vmaster element, you can add the slave controls * via snd_ctl_add_slave() or snd_ctl_add_slave_uncached(). * * The optional argument @tlv can be used to specify the TLV information * for dB scale of the master control. It should be a single element * with #SNDRV_CTL_TLVT_DB_SCALE, #SNDRV_CTL_TLV_DB_MINMAX or * #SNDRV_CTL_TLVT_DB_MINMAX_MUTE type, and should be the max 0dB. */ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name, const unsigned int *tlv) { struct link_master *master; struct snd_kcontrol *kctl; struct snd_kcontrol_new knew; memset(&knew, 0, sizeof(knew)); knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.name = name; knew.info = master_info; master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return NULL; INIT_LIST_HEAD(&master->slaves); kctl = snd_ctl_new1(&knew, master); if (!kctl) { kfree(master); return NULL; } /* override some callbacks */ kctl->info = master_info; kctl->get = master_get; kctl->put = master_put; kctl->private_free = master_free; /* additional (constant) TLV read */ if (tlv && (tlv[0] == SNDRV_CTL_TLVT_DB_SCALE || tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX || tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX_MUTE)) { kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; memcpy(master->tlv, tlv, sizeof(master->tlv)); kctl->tlv.p = master->tlv; } return kctl; } EXPORT_SYMBOL(snd_ctl_make_virtual_master); /** * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control * @kcontrol: vmaster kctl element * @hook: the hook function * @private_data: the private_data pointer to be saved * * Adds the given hook to the vmaster control element so that it's called * at each time when the value is changed. */ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kcontrol, void (*hook)(void *private_data, int), void *private_data) { struct link_master *master = snd_kcontrol_chip(kcontrol); master->hook = hook; master->hook_private_data = private_data; return 0; } EXPORT_SYMBOL_GPL(snd_ctl_add_vmaster_hook); /** * snd_ctl_sync_vmaster_hook - Sync the vmaster hook * @kcontrol: vmaster kctl element * * Call the hook function to synchronize with the current value of the given * vmaster element. NOP when NULL is passed to @kcontrol or the hook doesn't * exist. */ void snd_ctl_sync_vmaster_hook(struct snd_kcontrol *kcontrol) { struct link_master *master; if (!kcontrol) return; master = snd_kcontrol_chip(kcontrol); if (master->hook) master->hook(master->hook_private_data, master->val); } EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster_hook);
gpl-2.0
NamelessRom/android_kernel_oneplus_msm8994
drivers/media/usb/usbvision/usbvision-video.c
1880
48563
/* * USB USBVISION Video device driver 0.9.10 * * * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * * This module is part of usbvision driver project. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Let's call the version 0.... until compression decoding is completely * implemented. * * This driver is written by Jose Ignacio Gijon and Joerg Heckenbach. * It was based on USB CPiA driver written by Peter Pregler, * Scott J. Bertin and Johannes Erdfelt * Ideas are taken from bttv driver by Ralph Metzler, Marcus Metzler & * Gerd Knorr and zoran 36120/36125 driver by Pauline Middelink * Updates to driver completed by Dwaine P. Garden * * * TODO: * - use submit_urb for all setup packets * - Fix memory settings for nt1004. It is 4 times as big as the * nt1003 memory. * - Add audio on endpoint 3 for nt1004 chip. * Seems impossible, needs a codec interface. Which one? * - Clean up the driver. * - optimization for performance. * - Add Videotext capability (VBI). Working on it..... * - Check audio for other devices * */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/saa7115.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/tuner.h> #include <linux/workqueue.h> #include "usbvision.h" #include "usbvision-cards.h" #define DRIVER_AUTHOR \ "Joerg Heckenbach <joerg@heckenbach-aw.de>, " \ "Dwaine Garden <DwaineGarden@rogers.com>" #define DRIVER_NAME "usbvision" #define DRIVER_ALIAS "USBVision" #define DRIVER_DESC "USBVision USB Video Device Driver for Linux" #define DRIVER_LICENSE "GPL" #define USBVISION_VERSION_STRING "0.9.11" #define ENABLE_HEXDUMP 0 /* Enable if you need it */ #ifdef USBVISION_DEBUG #define PDEBUG(level, fmt, args...) { \ if (video_debug & (level)) \ printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \ __func__, __LINE__ , ## args); \ } #else #define PDEBUG(level, fmt, args...) do {} while (0) #endif #define DBG_IO (1 << 1) #define DBG_PROBE (1 << 2) #define DBG_MMAP (1 << 3) /* String operations */ #define rmspace(str) while (*str == ' ') str++; #define goto2next(str) while (*str != ' ') str++; while (*str == ' ') str++; /* sequential number of usbvision device */ static int usbvision_nr; static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = { { 1, 1, 8, V4L2_PIX_FMT_GREY , "GREY" }, { 1, 2, 16, V4L2_PIX_FMT_RGB565 , "RGB565" }, { 1, 3, 24, V4L2_PIX_FMT_RGB24 , "RGB24" }, { 1, 4, 32, V4L2_PIX_FMT_RGB32 , "RGB32" }, { 1, 2, 16, V4L2_PIX_FMT_RGB555 , "RGB555" }, { 1, 2, 16, V4L2_PIX_FMT_YUYV , "YUV422" }, { 1, 2, 12, V4L2_PIX_FMT_YVU420 , "YUV420P" }, /* 1.5 ! */ { 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" } }; /* Function prototypes */ static void usbvision_release(struct usb_usbvision *usbvision); /* Default initialization of device driver parameters */ /* Set the default format for ISOC endpoint */ static int isoc_mode = ISOC_MODE_COMPRESS; /* Set the default Debug Mode of the device driver */ static int video_debug; /* Set the default device to power on at startup */ static int power_on_at_open = 1; /* Sequential Number of Video Device */ static int video_nr = -1; /* Sequential Number of Radio Device */ static int radio_nr = -1; /* Grab parameters for the device driver */ /* Showing parameters under SYSFS */ module_param(isoc_mode, int, 0444); module_param(video_debug, int, 0444); module_param(power_on_at_open, int, 0444); module_param(video_nr, int, 0444); module_param(radio_nr, int, 0444); MODULE_PARM_DESC(isoc_mode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)"); MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)"); MODULE_PARM_DESC(power_on_at_open, " Set the default device to power on when device is opened. Default: 1 (On)"); MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)"); MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)"); /* Misc stuff */ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); MODULE_VERSION(USBVISION_VERSION_STRING); MODULE_ALIAS(DRIVER_ALIAS); /*****************************************************************************/ /* SYSFS Code - Copied from the stv680.c usb module. */ /* Device information is located at /sys/class/video4linux/video0 */ /* Device parameters information is located at /sys/module/usbvision */ /* Device USB Information is located at */ /* /sys/bus/usb/drivers/USBVision Video Grabber */ /*****************************************************************************/ #define YES_NO(x) ((x) ? "Yes" : "No") static inline struct usb_usbvision *cd_to_usbvision(struct device *cd) { struct video_device *vdev = container_of(cd, struct video_device, dev); return video_get_drvdata(vdev); } static ssize_t show_version(struct device *cd, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", USBVISION_VERSION_STRING); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", usbvision_device_data[usbvision->dev_model].model_string); } static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); static ssize_t show_hue(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_HUE; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL); static ssize_t show_contrast(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_CONTRAST; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL); static ssize_t show_brightness(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_BRIGHTNESS; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL); static ssize_t show_saturation(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_SATURATION; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL); static ssize_t show_streaming(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->streaming == stream_on ? 1 : 0)); } static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL); static ssize_t show_compression(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS)); } static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL); static ssize_t show_device_bridge(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%d\n", usbvision->bridge_type); } static DEVICE_ATTR(bridge, S_IRUGO, show_device_bridge, NULL); static void usbvision_create_sysfs(struct video_device *vdev) { int res; if (!vdev) return; do { res = device_create_file(&vdev->dev, &dev_attr_version); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_model); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_hue); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_contrast); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_brightness); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_saturation); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_streaming); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_compression); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_bridge); if (res >= 0) return; } while (0); dev_err(&vdev->dev, "%s error: %d\n", __func__, res); } static void usbvision_remove_sysfs(struct video_device *vdev) { if (vdev) { device_remove_file(&vdev->dev, &dev_attr_version); device_remove_file(&vdev->dev, &dev_attr_model); device_remove_file(&vdev->dev, &dev_attr_hue); device_remove_file(&vdev->dev, &dev_attr_contrast); device_remove_file(&vdev->dev, &dev_attr_brightness); device_remove_file(&vdev->dev, &dev_attr_saturation); device_remove_file(&vdev->dev, &dev_attr_streaming); device_remove_file(&vdev->dev, &dev_attr_compression); device_remove_file(&vdev->dev, &dev_attr_bridge); } } /* * usbvision_open() * * This is part of Video 4 Linux API. The driver can be opened by one * client only (checks internal counter 'usbvision->user'). The procedure * then allocates buffers needed for video processing. * */ static int usbvision_v4l2_open(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, "open"); if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; usbvision_reset_power_off_timer(usbvision); if (usbvision->user) err_code = -EBUSY; else { /* Allocate memory for the scratch ring buffer */ err_code = usbvision_scratch_alloc(usbvision); if (isoc_mode == ISOC_MODE_COMPRESS) { /* Allocate intermediate decompression buffers only if needed */ err_code = usbvision_decompress_alloc(usbvision); } if (err_code) { /* Deallocate all buffers if trouble */ usbvision_scratch_free(usbvision); usbvision_decompress_free(usbvision); } } /* If so far no errors then we shall start the camera */ if (!err_code) { if (usbvision->power == 0) { usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } /* Send init sequence only once, it's large! */ if (!usbvision->initialized) { int setup_ok = 0; setup_ok = usbvision_setup(usbvision, isoc_mode); if (setup_ok) usbvision->initialized = 1; else err_code = -EBUSY; } if (!err_code) { usbvision_begin_streaming(usbvision); err_code = usbvision_init_isoc(usbvision); /* device must be initialized before isoc transfer */ usbvision_muxsel(usbvision, 0); usbvision->user++; } else { if (power_on_at_open) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } } } /* prepare queues */ usbvision_empty_framequeues(usbvision); mutex_unlock(&usbvision->v4l2_lock); PDEBUG(DBG_IO, "success"); return err_code; } /* * usbvision_v4l2_close() * * This is part of Video 4 Linux API. The procedure * stops streaming and deallocates all buffers that were earlier * allocated in usbvision_v4l2_open(). * */ static int usbvision_v4l2_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); PDEBUG(DBG_IO, "close"); mutex_lock(&usbvision->v4l2_lock); usbvision_audio_off(usbvision); usbvision_restart_isoc(usbvision); usbvision_stop_isoc(usbvision); usbvision_decompress_free(usbvision); usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision_scratch_free(usbvision); usbvision->user--; if (power_on_at_open) { /* power off in a little while to avoid off/on every close/open short sequences */ usbvision_set_power_off_timer(usbvision); usbvision->initialized = 0; } if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); } mutex_unlock(&usbvision->v4l2_lock); PDEBUG(DBG_IO, "success"); return 0; } /* * usbvision_ioctl() * * This is part of Video 4 Linux API. The procedure handles ioctl() calls. * */ #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* NT100x has a 8-bit register space */ err_code = usbvision_read_reg(usbvision, reg->reg&0xff); if (err_code < 0) { dev_err(&usbvision->vdev->dev, "%s: VIDIOC_DBG_G_REGISTER failed: error %d\n", __func__, err_code); return err_code; } reg->val = err_code; reg->size = 1; return 0; } static int vidioc_s_register(struct file *file, void *priv, const struct v4l2_dbg_register *reg) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* NT100x has a 8-bit register space */ err_code = usbvision_write_reg(usbvision, reg->reg & 0xff, reg->val); if (err_code < 0) { dev_err(&usbvision->vdev->dev, "%s: VIDIOC_DBG_S_REGISTER failed: error %d\n", __func__, err_code); return err_code; } return 0; } #endif static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *vc) { struct usb_usbvision *usbvision = video_drvdata(file); strlcpy(vc->driver, "USBVision", sizeof(vc->driver)); strlcpy(vc->card, usbvision_device_data[usbvision->dev_model].model_string, sizeof(vc->card)); usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info)); vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | (usbvision->have_tuner ? V4L2_CAP_TUNER : 0); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *vi) { struct usb_usbvision *usbvision = video_drvdata(file); int chan; if (vi->index >= usbvision->video_inputs) return -EINVAL; if (usbvision->have_tuner) chan = vi->index; else chan = vi->index + 1; /* skip Television string*/ /* Determine the requested input characteristics specific for each usbvision card model */ switch (chan) { case 0: if (usbvision_device_data[usbvision->dev_model].video_channels == 4) { strcpy(vi->name, "White Video Input"); } else { strcpy(vi->name, "Television"); vi->type = V4L2_INPUT_TYPE_TUNER; vi->audioset = 1; vi->tuner = chan; vi->std = USBVISION_NORMS; } break; case 1: vi->type = V4L2_INPUT_TYPE_CAMERA; if (usbvision_device_data[usbvision->dev_model].video_channels == 4) strcpy(vi->name, "Green Video Input"); else strcpy(vi->name, "Composite Video Input"); vi->std = V4L2_STD_PAL; break; case 2: vi->type = V4L2_INPUT_TYPE_CAMERA; if (usbvision_device_data[usbvision->dev_model].video_channels == 4) strcpy(vi->name, "Yellow Video Input"); else strcpy(vi->name, "S-Video Input"); vi->std = V4L2_STD_PAL; break; case 3: vi->type = V4L2_INPUT_TYPE_CAMERA; strcpy(vi->name, "Red Video Input"); vi->std = V4L2_STD_PAL; break; } return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *input) { struct usb_usbvision *usbvision = video_drvdata(file); *input = usbvision->ctl_input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int input) { struct usb_usbvision *usbvision = video_drvdata(file); if (input >= usbvision->video_inputs) return -EINVAL; usbvision_muxsel(usbvision, input); usbvision_set_input(usbvision); usbvision_set_output(usbvision, usbvision->curwidth, usbvision->curheight); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id) { struct usb_usbvision *usbvision = video_drvdata(file); usbvision->tvnorm_id = id; call_all(usbvision, core, s_std, usbvision->tvnorm_id); /* propagate the change to the decoder */ usbvision_muxsel(usbvision, usbvision->ctl_input); return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct usb_usbvision *usbvision = video_drvdata(file); if (!usbvision->have_tuner || vt->index) /* Only tuner 0 */ return -EINVAL; if (usbvision->radio) { strcpy(vt->name, "Radio"); vt->type = V4L2_TUNER_RADIO; } else { strcpy(vt->name, "Television"); } /* Let clients fill in the remainder of this struct */ call_all(usbvision, tuner, g_tuner, vt); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt) { struct usb_usbvision *usbvision = video_drvdata(file); /* Only no or one tuner for now */ if (!usbvision->have_tuner || vt->index) return -EINVAL; /* let clients handle this */ call_all(usbvision, tuner, s_tuner, vt); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct usb_usbvision *usbvision = video_drvdata(file); freq->tuner = 0; /* Only one tuner */ if (usbvision->radio) freq->type = V4L2_TUNER_RADIO; else freq->type = V4L2_TUNER_ANALOG_TV; freq->frequency = usbvision->freq; return 0; } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *freq) { struct usb_usbvision *usbvision = video_drvdata(file); /* Only no or one tuner for now */ if (!usbvision->have_tuner || freq->tuner) return -EINVAL; usbvision->freq = freq->frequency; call_all(usbvision, tuner, s_frequency, freq); return 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct usb_usbvision *usbvision = video_drvdata(file); if (usbvision->radio) strcpy(a->name, "Radio"); else strcpy(a->name, "TV"); return 0; } static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a) { if (a->index) return -EINVAL; return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, queryctrl, ctrl); if (!ctrl->type) return -EINVAL; return 0; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, g_ctrl, ctrl); return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, s_ctrl, ctrl); return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *vr) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; RESTRICT_TO_RANGE(vr->count, 1, USBVISION_NUMFRAMES); /* Check input validity: the user must do a VIDEO CAPTURE and MMAP method. */ if (vr->memory != V4L2_MEMORY_MMAP) return -EINVAL; if (usbvision->streaming == stream_on) { ret = usbvision_stream_interrupt(usbvision); if (ret) return ret; } usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); vr->count = usbvision_frames_alloc(usbvision, vr->count); usbvision->cur_frame = NULL; return 0; } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); struct usbvision_frame *frame; /* FIXME : must control that buffers are mapped (VIDIOC_REQBUFS has been called) */ if (vb->index >= usbvision->num_frames) return -EINVAL; /* Updating the corresponding frame state */ vb->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; frame = &usbvision->frame[vb->index]; if (frame->grabstate >= frame_state_ready) vb->flags |= V4L2_BUF_FLAG_QUEUED; if (frame->grabstate >= frame_state_done) vb->flags |= V4L2_BUF_FLAG_DONE; if (frame->grabstate == frame_state_unused) vb->flags |= V4L2_BUF_FLAG_MAPPED; vb->memory = V4L2_MEMORY_MMAP; vb->m.offset = vb->index * PAGE_ALIGN(usbvision->max_frame_size); vb->memory = V4L2_MEMORY_MMAP; vb->field = V4L2_FIELD_NONE; vb->length = usbvision->curwidth * usbvision->curheight * usbvision->palette.bytes_per_pixel; vb->timestamp = usbvision->frame[vb->index].timestamp; vb->sequence = usbvision->frame[vb->index].sequence; return 0; } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); struct usbvision_frame *frame; unsigned long lock_flags; /* FIXME : works only on VIDEO_CAPTURE MODE, MMAP. */ if (vb->index >= usbvision->num_frames) return -EINVAL; frame = &usbvision->frame[vb->index]; if (frame->grabstate != frame_state_unused) return -EAGAIN; /* Mark it as ready and enqueue frame */ frame->grabstate = frame_state_ready; frame->scanstate = scan_state_scanning; frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */ vb->flags &= ~V4L2_BUF_FLAG_DONE; /* set v4l2_format index */ frame->v4l2_format = usbvision->palette; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); return 0; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; struct usbvision_frame *f; unsigned long lock_flags; if (list_empty(&(usbvision->outqueue))) { if (usbvision->streaming == stream_idle) return -EINVAL; ret = wait_event_interruptible (usbvision->wait_frame, !list_empty(&(usbvision->outqueue))); if (ret) return ret; } spin_lock_irqsave(&usbvision->queue_lock, lock_flags); f = list_entry(usbvision->outqueue.next, struct usbvision_frame, frame); list_del(usbvision->outqueue.next); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); f->grabstate = frame_state_unused; vb->memory = V4L2_MEMORY_MMAP; vb->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vb->index = f->index; vb->sequence = f->sequence; vb->timestamp = f->timestamp; vb->field = V4L2_FIELD_NONE; vb->bytesused = f->scanlength; return 0; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct usb_usbvision *usbvision = video_drvdata(file); usbvision->streaming = stream_on; call_all(usbvision, video, s_stream, 1); return 0; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct usb_usbvision *usbvision = video_drvdata(file); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (usbvision->streaming == stream_on) { usbvision_stream_interrupt(usbvision); /* Stop all video streamings */ call_all(usbvision, video, s_stream, 0); } usbvision_empty_framequeues(usbvision); return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *vfd) { if (vfd->index >= USBVISION_SUPPORTED_PALETTES - 1) return -EINVAL; strcpy(vfd->description, usbvision_v4l2_format[vfd->index].desc); vfd->pixelformat = usbvision_v4l2_format[vfd->index].format; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); vf->fmt.pix.width = usbvision->curwidth; vf->fmt.pix.height = usbvision->curheight; vf->fmt.pix.pixelformat = usbvision->palette.format; vf->fmt.pix.bytesperline = usbvision->curwidth * usbvision->palette.bytes_per_pixel; vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline * usbvision->curheight; vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */ return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); int format_idx; /* Find requested format in available ones */ for (format_idx = 0; format_idx < USBVISION_SUPPORTED_PALETTES; format_idx++) { if (vf->fmt.pix.pixelformat == usbvision_v4l2_format[format_idx].format) { usbvision->palette = usbvision_v4l2_format[format_idx]; break; } } /* robustness */ if (format_idx == USBVISION_SUPPORTED_PALETTES) return -EINVAL; RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH); RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT); vf->fmt.pix.bytesperline = vf->fmt.pix.width* usbvision->palette.bytes_per_pixel; vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; ret = vidioc_try_fmt_vid_cap(file, priv, vf); if (ret) return ret; /* stop io in case it is already in progress */ if (usbvision->streaming == stream_on) { ret = usbvision_stream_interrupt(usbvision); if (ret) return ret; } usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision->cur_frame = NULL; /* by now we are committed to the new data... */ usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height); return 0; } static ssize_t usbvision_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct usb_usbvision *usbvision = video_drvdata(file); int noblock = file->f_flags & O_NONBLOCK; unsigned long lock_flags; int ret, i; struct usbvision_frame *frame; PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __func__, (unsigned long)count, noblock); if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL)) return -EFAULT; /* This entry point is compatible with the mmap routines so that a user can do either VIDIOC_QBUF/VIDIOC_DQBUF to get frames or call read on the device. */ if (!usbvision->num_frames) { /* First, allocate some frames to work with if this has not been done with VIDIOC_REQBUF */ usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision_frames_alloc(usbvision, USBVISION_NUMFRAMES); } if (usbvision->streaming != stream_on) { /* no stream is running, make it running ! */ usbvision->streaming = stream_on; call_all(usbvision, video, s_stream, 1); } /* Then, enqueue as many frames as possible (like a user of VIDIOC_QBUF would do) */ for (i = 0; i < usbvision->num_frames; i++) { frame = &usbvision->frame[i]; if (frame->grabstate == frame_state_unused) { /* Mark it as ready and enqueue frame */ frame->grabstate = frame_state_ready; frame->scanstate = scan_state_scanning; /* Accumulated in usbvision_parse_data() */ frame->scanlength = 0; /* set v4l2_format index */ frame->v4l2_format = usbvision->palette; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_add_tail(&frame->frame, &usbvision->inqueue); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); } } /* Then try to steal a frame (like a VIDIOC_DQBUF would do) */ if (list_empty(&(usbvision->outqueue))) { if (noblock) return -EAGAIN; ret = wait_event_interruptible (usbvision->wait_frame, !list_empty(&(usbvision->outqueue))); if (ret) return ret; } spin_lock_irqsave(&usbvision->queue_lock, lock_flags); frame = list_entry(usbvision->outqueue.next, struct usbvision_frame, frame); list_del(usbvision->outqueue.next); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); /* An error returns an empty frame */ if (frame->grabstate == frame_state_error) { frame->bytes_read = 0; return 0; } PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld", __func__, frame->index, frame->bytes_read, frame->scanlength); /* copy bytes to user space; we allow for partials reads */ if ((count + frame->bytes_read) > (unsigned long)frame->scanlength) count = frame->scanlength - frame->bytes_read; if (copy_to_user(buf, frame->data + frame->bytes_read, count)) return -EFAULT; frame->bytes_read += count; PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld", __func__, (unsigned long)count, frame->bytes_read); /* For now, forget the frame if it has not been read in one shot. */ /* if (frame->bytes_read >= frame->scanlength) {*/ /* All data has been read */ frame->bytes_read = 0; /* Mark it as available to be used again. */ frame->grabstate = frame_state_unused; /* } */ return count; } static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct usb_usbvision *usbvision = video_drvdata(file); int res; if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; res = usbvision_read(file, buf, count, ppos); mutex_unlock(&usbvision->v4l2_lock); return res; } static int usbvision_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start; void *pos; u32 i; struct usb_usbvision *usbvision = video_drvdata(file); PDEBUG(DBG_MMAP, "mmap"); if (!USBVISION_IS_OPERATIONAL(usbvision)) return -EFAULT; if (!(vma->vm_flags & VM_WRITE) || size != PAGE_ALIGN(usbvision->max_frame_size)) { return -EINVAL; } for (i = 0; i < usbvision->num_frames; i++) { if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == usbvision->num_frames) { PDEBUG(DBG_MMAP, "mmap: user supplied mapping address is out of range"); return -EINVAL; } /* VM_IO is eventually going to replace PageReserved altogether */ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; pos = usbvision->frame[i].data; while (size > 0) { if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { PDEBUG(DBG_MMAP, "mmap: vm_insert_page failed"); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } return 0; } static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma) { struct usb_usbvision *usbvision = video_drvdata(file); int res; if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; res = usbvision_mmap(file, vma); mutex_unlock(&usbvision->v4l2_lock); return res; } /* * Here comes the stuff for radio on usbvision based devices * */ static int usbvision_radio_open(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, "%s:", __func__); if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return -ERESTARTSYS; if (usbvision->user) { dev_err(&usbvision->rdev->dev, "%s: Someone tried to open an already opened USBVision Radio!\n", __func__); err_code = -EBUSY; } else { if (power_on_at_open) { usbvision_reset_power_off_timer(usbvision); if (usbvision->power == 0) { usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } } /* Alternate interface 1 is is the biggest frame size */ err_code = usbvision_set_alternate(usbvision); if (err_code < 0) { usbvision->last_error = err_code; err_code = -EBUSY; goto out; } /* If so far no errors then we shall start the radio */ usbvision->radio = 1; call_all(usbvision, tuner, s_radio); usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO); usbvision->user++; } if (err_code) { if (power_on_at_open) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } } out: mutex_unlock(&usbvision->v4l2_lock); return err_code; } static int usbvision_radio_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, ""); mutex_lock(&usbvision->v4l2_lock); /* Set packet size to 0 */ usbvision->iface_alt = 0; err_code = usb_set_interface(usbvision->dev, usbvision->iface, usbvision->iface_alt); usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; if (power_on_at_open) { usbvision_set_power_off_timer(usbvision); usbvision->initialized = 0; } if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); } mutex_unlock(&usbvision->v4l2_lock); PDEBUG(DBG_IO, "success"); return err_code; } /* Video registration stuff */ /* Video template */ static const struct v4l2_file_operations usbvision_fops = { .owner = THIS_MODULE, .open = usbvision_v4l2_open, .release = usbvision_v4l2_close, .read = usbvision_v4l2_read, .mmap = usbvision_v4l2_mmap, .unlocked_ioctl = video_ioctl2, /* .poll = video_poll, */ }; static const struct v4l2_ioctl_ops usbvision_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static struct video_device usbvision_video_template = { .fops = &usbvision_fops, .ioctl_ops = &usbvision_ioctl_ops, .name = "usbvision-video", .release = video_device_release, .tvnorms = USBVISION_NORMS, .current_norm = V4L2_STD_PAL }; /* Radio template */ static const struct v4l2_file_operations usbvision_radio_fops = { .owner = THIS_MODULE, .open = usbvision_radio_open, .release = usbvision_radio_close, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, }; static struct video_device usbvision_radio_template = { .fops = &usbvision_radio_fops, .name = "usbvision-radio", .release = video_device_release, .ioctl_ops = &usbvision_radio_ioctl_ops, .tvnorms = USBVISION_NORMS, .current_norm = V4L2_STD_PAL }; static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision, struct video_device *vdev_template, char *name) { struct usb_device *usb_dev = usbvision->dev; struct video_device *vdev; if (usb_dev == NULL) { dev_err(&usbvision->dev->dev, "%s: usbvision->dev is not set\n", __func__); return NULL; } vdev = video_device_alloc(); if (NULL == vdev) return NULL; *vdev = *vdev_template; vdev->lock = &usbvision->v4l2_lock; vdev->v4l2_dev = &usbvision->v4l2_dev; snprintf(vdev->name, sizeof(vdev->name), "%s", name); video_set_drvdata(vdev, usbvision); return vdev; } /* unregister video4linux devices */ static void usbvision_unregister_video(struct usb_usbvision *usbvision) { /* Radio Device: */ if (usbvision->rdev) { PDEBUG(DBG_PROBE, "unregister %s [v4l2]", video_device_node_name(usbvision->rdev)); if (video_is_registered(usbvision->rdev)) video_unregister_device(usbvision->rdev); else video_device_release(usbvision->rdev); usbvision->rdev = NULL; } /* Video Device: */ if (usbvision->vdev) { PDEBUG(DBG_PROBE, "unregister %s [v4l2]", video_device_node_name(usbvision->vdev)); if (video_is_registered(usbvision->vdev)) video_unregister_device(usbvision->vdev); else video_device_release(usbvision->vdev); usbvision->vdev = NULL; } } /* register video4linux devices */ static int usbvision_register_video(struct usb_usbvision *usbvision) { /* Video Device: */ usbvision->vdev = usbvision_vdev_init(usbvision, &usbvision_video_template, "USBVision Video"); if (usbvision->vdev == NULL) goto err_exit; if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0) goto err_exit; printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n", usbvision->nr, video_device_node_name(usbvision->vdev)); /* Radio Device: */ if (usbvision_device_data[usbvision->dev_model].radio) { /* usbvision has radio */ usbvision->rdev = usbvision_vdev_init(usbvision, &usbvision_radio_template, "USBVision Radio"); if (usbvision->rdev == NULL) goto err_exit; if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0) goto err_exit; printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n", usbvision->nr, video_device_node_name(usbvision->rdev)); } /* all done */ return 0; err_exit: dev_err(&usbvision->dev->dev, "USBVision[%d]: video_register_device() failed\n", usbvision->nr); usbvision_unregister_video(usbvision); return -1; } /* * usbvision_alloc() * * This code allocates the struct usb_usbvision. * It is filled with default values. * * Returns NULL on error, a pointer to usb_usbvision else. * */ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev, struct usb_interface *intf) { struct usb_usbvision *usbvision; usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL); if (usbvision == NULL) return NULL; usbvision->dev = dev; if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev)) goto err_free; mutex_init(&usbvision->v4l2_lock); /* prepare control urb for control messages during interrupts */ usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL); if (usbvision->ctrl_urb == NULL) goto err_unreg; init_waitqueue_head(&usbvision->ctrl_urb_wq); usbvision_init_power_off_timer(usbvision); return usbvision; err_unreg: v4l2_device_unregister(&usbvision->v4l2_dev); err_free: kfree(usbvision); return NULL; } /* * usbvision_release() * * This code does final release of struct usb_usbvision. This happens * after the device is disconnected -and- all clients closed their files. * */ static void usbvision_release(struct usb_usbvision *usbvision) { PDEBUG(DBG_PROBE, ""); usbvision_reset_power_off_timer(usbvision); usbvision->initialized = 0; usbvision_remove_sysfs(usbvision->vdev); usbvision_unregister_video(usbvision); usb_free_urb(usbvision->ctrl_urb); v4l2_device_unregister(&usbvision->v4l2_dev); kfree(usbvision); PDEBUG(DBG_PROBE, "success"); } /*********************** usb interface **********************************/ static void usbvision_configure_video(struct usb_usbvision *usbvision) { int model; if (usbvision == NULL) return; model = usbvision->dev_model; usbvision->palette = usbvision_v4l2_format[2]; /* V4L2_PIX_FMT_RGB24; */ if (usbvision_device_data[usbvision->dev_model].vin_reg2_override) { usbvision->vin_reg2_preset = usbvision_device_data[usbvision->dev_model].vin_reg2; } else { usbvision->vin_reg2_preset = 0; } usbvision->tvnorm_id = usbvision_device_data[model].video_norm; usbvision->video_inputs = usbvision_device_data[model].video_channels; usbvision->ctl_input = 0; /* This should be here to make i2c clients to be able to register */ /* first switch off audio */ if (usbvision_device_data[model].audio_channels > 0) usbvision_audio_off(usbvision); if (!power_on_at_open) { /* and then power up the noisy tuner */ usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } } /* * usbvision_probe() * * This procedure queries device descriptor and accepts the interface * if it looks like USBVISION video device * */ static int usbvision_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf)); struct usb_interface *uif; __u8 ifnum = intf->altsetting->desc.bInterfaceNumber; const struct usb_host_interface *interface; struct usb_usbvision *usbvision = NULL; const struct usb_endpoint_descriptor *endpoint; int model, i; PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u", dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum); model = devid->driver_info; if (model < 0 || model >= usbvision_device_data_size) { PDEBUG(DBG_PROBE, "model out of bounds %d", model); return -ENODEV; } printk(KERN_INFO "%s: %s found\n", __func__, usbvision_device_data[model].model_string); if (usbvision_device_data[model].interface >= 0) interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; else interface = &dev->actconfig->interface[ifnum]->altsetting[0]; endpoint = &interface->endpoint[1].desc; if (!usb_endpoint_xfer_isoc(endpoint)) { dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n", __func__, ifnum); dev_err(&intf->dev, "%s: Endpoint attributes %d", __func__, endpoint->bmAttributes); return -ENODEV; } if (usb_endpoint_dir_out(endpoint)) { dev_err(&intf->dev, "%s: interface %d. has ISO OUT endpoint!\n", __func__, ifnum); return -ENODEV; } usbvision = usbvision_alloc(dev, intf); if (usbvision == NULL) { dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__); return -ENOMEM; } if (dev->descriptor.bNumConfigurations > 1) usbvision->bridge_type = BRIDGE_NT1004; else if (model == DAZZLE_DVC_90_REV_1_SECAM) usbvision->bridge_type = BRIDGE_NT1005; else usbvision->bridge_type = BRIDGE_NT1003; PDEBUG(DBG_PROBE, "bridge_type %d", usbvision->bridge_type); /* compute alternate max packet sizes */ uif = dev->actconfig->interface[0]; usbvision->num_alt = uif->num_altsetting; PDEBUG(DBG_PROBE, "Alternate settings: %i", usbvision->num_alt); usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL); if (usbvision->alt_max_pkt_size == NULL) { dev_err(&intf->dev, "usbvision: out of memory!\n"); return -ENOMEM; } for (i = 0; i < usbvision->num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. wMaxPacketSize); usbvision->alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); PDEBUG(DBG_PROBE, "Alternate setting %i, max size= %i", i, usbvision->alt_max_pkt_size[i]); } usbvision->nr = usbvision_nr++; usbvision->have_tuner = usbvision_device_data[model].tuner; if (usbvision->have_tuner) usbvision->tuner_type = usbvision_device_data[model].tuner_type; usbvision->dev_model = model; usbvision->remove_pending = 0; usbvision->iface = ifnum; usbvision->iface_alt = 0; usbvision->video_endp = endpoint->bEndpointAddress; usbvision->isoc_packet_size = 0; usbvision->usb_bandwidth = 0; usbvision->user = 0; usbvision->streaming = stream_off; usbvision_configure_video(usbvision); usbvision_register_video(usbvision); usbvision_create_sysfs(usbvision->vdev); PDEBUG(DBG_PROBE, "success"); return 0; } /* * usbvision_disconnect() * * This procedure stops all driver activity, deallocates interface-private * structure (pointed by 'ptr') and after that driver should be removable * with no ill consequences. * */ static void usbvision_disconnect(struct usb_interface *intf) { struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); PDEBUG(DBG_PROBE, ""); if (usbvision == NULL) { pr_err("%s: usb_get_intfdata() failed\n", __func__); return; } mutex_lock(&usbvision->v4l2_lock); /* At this time we ask to cancel outstanding URBs */ usbvision_stop_isoc(usbvision); v4l2_device_disconnect(&usbvision->v4l2_dev); if (usbvision->power) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); } usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ usb_put_dev(usbvision->dev); usbvision->dev = NULL; /* USB device is no more */ mutex_unlock(&usbvision->v4l2_lock); if (usbvision->user) { printk(KERN_INFO "%s: In use, disconnect pending\n", __func__); wake_up_interruptible(&usbvision->wait_frame); wake_up_interruptible(&usbvision->wait_stream); } else { usbvision_release(usbvision); } PDEBUG(DBG_PROBE, "success"); } static struct usb_driver usbvision_driver = { .name = "usbvision", .id_table = usbvision_table, .probe = usbvision_probe, .disconnect = usbvision_disconnect, }; /* * usbvision_init() * * This code is run to initialize the driver. * */ static int __init usbvision_init(void) { int err_code; PDEBUG(DBG_PROBE, ""); PDEBUG(DBG_IO, "IO debugging is enabled [video]"); PDEBUG(DBG_PROBE, "PROBE debugging is enabled [video]"); PDEBUG(DBG_MMAP, "MMAP debugging is enabled [video]"); /* disable planar mode support unless compression enabled */ if (isoc_mode != ISOC_MODE_COMPRESS) { /* FIXME : not the right way to set supported flag */ usbvision_v4l2_format[6].supported = 0; /* V4L2_PIX_FMT_YVU420 */ usbvision_v4l2_format[7].supported = 0; /* V4L2_PIX_FMT_YUV422P */ } err_code = usb_register(&usbvision_driver); if (err_code == 0) { printk(KERN_INFO DRIVER_DESC " : " USBVISION_VERSION_STRING "\n"); PDEBUG(DBG_PROBE, "success"); } return err_code; } static void __exit usbvision_exit(void) { PDEBUG(DBG_PROBE, ""); usb_deregister(&usbvision_driver); PDEBUG(DBG_PROBE, "success"); } module_init(usbvision_init); module_exit(usbvision_exit); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
tyler6389/android_kernel_samsung_frescolte
drivers/crypto/msm/qcrypto_fips.c
1880
14327
/* Qcrypto: FIPS 140-2 Selftests * * Copyright (c) 2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <crypto/hash.h> #include <crypto/ctr.h> #include <crypto/des.h> #include <crypto/aes.h> #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/algapi.h> #include <crypto/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include <linux/platform_data/qcom_crypto_device.h> #include <mach/qcrypto.h> #include "qcryptoi.h" #include "qcrypto_fips.h" /* * Callback function */ static void _fips_cb(struct crypto_async_request *crypto_async_req, int err) { struct _fips_completion *fips_completion = crypto_async_req->data; if (err == -EINPROGRESS) return; fips_completion->err = err; complete(&fips_completion->completion); } /* * Function to prefix if needed */ static int _fips_get_alg_cra_name(char cra_name[], char *prefix, unsigned int size) { char new_cra_name[CRYPTO_MAX_ALG_NAME]; strlcpy(new_cra_name, prefix, CRYPTO_MAX_ALG_NAME); if (CRYPTO_MAX_ALG_NAME < size + strlen(prefix)) return -EINVAL; strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME); strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME); return 0; } /* * Sha/HMAC self tests */ int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index = 0, num_tv; char *k_out_buf = NULL; struct scatterlist fips_sg; struct crypto_ahash *tfm; struct ahash_request *ahash_req; struct _fips_completion fips_completion; struct _fips_test_vector_sha_hmac tv_sha_hmac; num_tv = (sizeof(fips_test_vector_sha_hmac)) / (sizeof(struct _fips_test_vector_sha_hmac)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_sha_hmac, &fips_test_vector_sha_hmac[tv_index], (sizeof(struct _fips_test_vector_sha_hmac))); k_out_buf = kzalloc(tv_sha_hmac.diglen, GFP_KERNEL); if (k_out_buf == NULL) { pr_err("qcrypto: Failed to allocate memory for k_out_buf %ld\n", PTR_ERR(k_out_buf)); return -ENOMEM; } memset(k_out_buf, 0, tv_sha_hmac.diglen); init_completion(&fips_completion.completion); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (tv_sha_hmac.klen == 0) { if (selftest_d->prefix_ahash_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } else { if (selftest_d->prefix_hmac_algo) if (_fips_get_alg_cra_name(tv_sha_hmac .hash_alg, selftest_d->algo_prefix, strlen(tv_sha_hmac.hash_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_ahash(tv_sha_hmac.hash_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_sha_hmac.hash_alg); rc = PTR_ERR(tfm); goto clr_buf; } ahash_req = ahash_request_alloc(tfm, GFP_KERNEL); if (!ahash_req) { pr_err("qcrypto: ahash_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_ahash_set_device(ahash_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_ahash_req; } ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); sg_init_one(&fips_sg, &tv_sha_hmac.input[0], tv_sha_hmac.ilen); crypto_ahash_clear_flags(tfm, ~0); if (tv_sha_hmac.klen != 0) { rc = crypto_ahash_setkey(tfm, tv_sha_hmac.key, tv_sha_hmac.klen); if (rc) { pr_err("qcrypto: crypto_ahash_setkey failed\n"); goto clr_ahash_req; } } ahash_request_set_crypt(ahash_req, &fips_sg, k_out_buf, tv_sha_hmac.ilen); rc = crypto_ahash_digest(ahash_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:SHA: wait_for_completion failed\n"); goto clr_ahash_req; } } if (memcmp(k_out_buf, tv_sha_hmac.digest, tv_sha_hmac.diglen)) rc = -1; clr_ahash_req: ahash_request_free(ahash_req); clr_tfm: crypto_free_ahash(tfm); clr_buf: kzfree(k_out_buf); /* For any failure, return error */ if (rc) return rc; } return rc; } /* * Cipher algorithm self tests */ int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index, num_tv; struct crypto_ablkcipher *tfm; struct ablkcipher_request *ablkcipher_req; struct _fips_completion fips_completion; char *k_align_src = NULL; struct scatterlist fips_sg; struct _fips_test_vector_cipher tv_cipher; num_tv = (sizeof(fips_test_vector_cipher)) / (sizeof(struct _fips_test_vector_cipher)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_cipher, &fips_test_vector_cipher[tv_index], (sizeof(struct _fips_test_vector_cipher))); /* Single buffer allocation for in place operation */ k_align_src = kzalloc(tv_cipher.pln_txt_len, GFP_KERNEL); if (k_align_src == NULL) { pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n", PTR_ERR(k_align_src)); return -ENOMEM; } memcpy(&k_align_src[0], tv_cipher.pln_txt, tv_cipher.pln_txt_len); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (!strcmp(tv_cipher.mod_alg, "xts(aes)")) { if (selftest_d->prefix_aes_xts_algo) if (_fips_get_alg_cra_name( tv_cipher.mod_alg, selftest_d->algo_prefix, strlen(tv_cipher.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } else { if (selftest_d->prefix_aes_cbc_ecb_ctr_algo) if (_fips_get_alg_cra_name( tv_cipher.mod_alg, selftest_d->algo_prefix, strlen(tv_cipher.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_ablkcipher(tv_cipher.mod_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_cipher.mod_alg); rc = -ENOMEM; goto clr_buf; } ablkcipher_req = ablkcipher_request_alloc(tfm, GFP_KERNEL); if (!ablkcipher_req) { pr_err("qcrypto: ablkcipher_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_cipher_set_device(ablkcipher_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_ablkcipher_req; } ablkcipher_request_set_callback(ablkcipher_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_ablkcipher_clear_flags(tfm, ~0); rc = crypto_ablkcipher_setkey(tfm, tv_cipher.key, tv_cipher.klen); if (rc) { pr_err("qcrypto: crypto_ablkcipher_setkey failed\n"); goto clr_ablkcipher_req; } sg_set_buf(&fips_sg, k_align_src, tv_cipher.enc_txt_len); sg_mark_end(&fips_sg); ablkcipher_request_set_crypt(ablkcipher_req, &fips_sg, &fips_sg, tv_cipher.pln_txt_len, tv_cipher.iv); /**** Encryption Test ****/ init_completion(&fips_completion.completion); rc = crypto_ablkcipher_encrypt(ablkcipher_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:cipher:ENC, wait_for_completion failed\n"); goto clr_ablkcipher_req; } } if (memcmp(k_align_src, tv_cipher.enc_txt, tv_cipher.enc_txt_len)) { rc = -1; goto clr_ablkcipher_req; } /**** Decryption test ****/ init_completion(&fips_completion.completion); rc = crypto_ablkcipher_decrypt(ablkcipher_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:cipher:DEC, wait_for_completion failed\n"); goto clr_ablkcipher_req; } } if (memcmp(k_align_src, tv_cipher.pln_txt, tv_cipher.pln_txt_len)) rc = -1; clr_ablkcipher_req: ablkcipher_request_free(ablkcipher_req); clr_tfm: crypto_free_ablkcipher(tfm); clr_buf: kzfree(k_align_src); if (rc) return rc; } return rc; } /* * AEAD algorithm self tests */ int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d) { int rc = 0, err, tv_index, num_tv, authsize, buf_length; struct crypto_aead *tfm; struct aead_request *aead_req; struct _fips_completion fips_completion; struct scatterlist fips_sg, fips_assoc_sg; char *k_align_src = NULL; struct _fips_test_vector_aead tv_aead; num_tv = (sizeof(fips_test_vector_aead)) / (sizeof(struct _fips_test_vector_aead)); /* One-by-one testing */ for (tv_index = 0; tv_index < num_tv; tv_index++) { memcpy(&tv_aead, &fips_test_vector_aead[tv_index], (sizeof(struct _fips_test_vector_aead))); if (tv_aead.pln_txt_len > tv_aead.enc_txt_len) buf_length = tv_aead.pln_txt_len; else buf_length = tv_aead.enc_txt_len; /* Single buffer allocation for in place operation */ k_align_src = kzalloc(buf_length, GFP_KERNEL); if (k_align_src == NULL) { pr_err("qcrypto:, Failed to allocate memory for k_align_src %ld\n", PTR_ERR(k_align_src)); return -ENOMEM; } memcpy(&k_align_src[0], tv_aead.pln_txt, tv_aead.pln_txt_len); /* use_sw flags are set in dtsi file which makes default Linux API calls to go to s/w crypto instead of h/w crypto. This code makes sure that all selftests calls always go to h/w, independent of DTSI flags. */ if (selftest_d->prefix_aead_algo) { if (_fips_get_alg_cra_name(tv_aead.mod_alg, selftest_d->algo_prefix, strlen(tv_aead.mod_alg))) { rc = -1; pr_err("Algo Name is too long for tv %d\n", tv_index); goto clr_buf; } } tfm = crypto_alloc_aead(tv_aead.mod_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("qcrypto: %s algorithm not found\n", tv_aead.mod_alg); rc = -ENOMEM; goto clr_buf; } aead_req = aead_request_alloc(tfm, GFP_KERNEL); if (!aead_req) { pr_err("qcrypto:aead_request_alloc failed\n"); rc = -ENOMEM; goto clr_tfm; } rc = qcrypto_aead_set_device(aead_req, selftest_d->ce_device); if (rc != 0) { pr_err("%s qcrypto_cipher_set_device failed with err %d\n", __func__, rc); goto clr_aead_req; } init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.pln_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.pln_txt_len , tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); /**** Encryption test ****/ rc = crypto_aead_encrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:ENC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.enc_txt, tv_aead.enc_txt_len)) { rc = -1; goto clr_aead_req; } /** Decryption test **/ init_completion(&fips_completion.completion); aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, _fips_cb, &fips_completion); crypto_aead_clear_flags(tfm, ~0); rc = crypto_aead_setkey(tfm, tv_aead.key, tv_aead.klen); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setkey failed\n"); goto clr_aead_req; } authsize = abs(tv_aead.enc_txt_len - tv_aead.pln_txt_len); rc = crypto_aead_setauthsize(tfm, authsize); if (rc) { pr_err("qcrypto:aead:DEC, crypto_aead_setauthsize failed\n"); goto clr_aead_req; } sg_init_one(&fips_sg, k_align_src, tv_aead.enc_txt_len + authsize); aead_request_set_crypt(aead_req, &fips_sg, &fips_sg, tv_aead.enc_txt_len, tv_aead.iv); sg_init_one(&fips_assoc_sg, tv_aead.assoc, tv_aead.alen); aead_request_set_assoc(aead_req, &fips_assoc_sg, tv_aead.alen); rc = crypto_aead_decrypt(aead_req); if (rc == -EINPROGRESS || rc == -EBUSY) { rc = wait_for_completion_interruptible( &fips_completion.completion); err = fips_completion.err; if (!rc && !err) { INIT_COMPLETION(fips_completion.completion); } else { pr_err("qcrypto:aead:DEC, wait_for_completion failed\n"); goto clr_aead_req; } } if (memcmp(k_align_src, tv_aead.pln_txt, tv_aead.pln_txt_len)) { rc = -1; goto clr_aead_req; } clr_aead_req: aead_request_free(aead_req); clr_tfm: crypto_free_aead(tfm); clr_buf: kzfree(k_align_src); /* In case of any failure, return error */ if (rc) return rc; } return rc; }
gpl-2.0
lplachno/mx6-dev
drivers/md/dm-cache-policy-mq.c
2136
28340
/* * Copyright (C) 2012 Red Hat. All rights reserved. * * This file is released under the GPL. */ #include "dm-cache-policy.h" #include "dm.h" #include <linux/hash.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/vmalloc.h> #define DM_MSG_PREFIX "cache-policy-mq" static struct kmem_cache *mq_entry_cache; /*----------------------------------------------------------------*/ static unsigned next_power(unsigned n, unsigned min) { return roundup_pow_of_two(max(n, min)); } /*----------------------------------------------------------------*/ static unsigned long *alloc_bitset(unsigned nr_entries) { size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG); return vzalloc(s); } static void free_bitset(unsigned long *bits) { vfree(bits); } /*----------------------------------------------------------------*/ /* * Large, sequential ios are probably better left on the origin device since * spindles tend to have good bandwidth. * * The io_tracker tries to spot when the io is in one of these sequential * modes. * * Two thresholds to switch between random and sequential io mode are defaulting * as follows and can be adjusted via the constructor and message interfaces. */ #define RANDOM_THRESHOLD_DEFAULT 4 #define SEQUENTIAL_THRESHOLD_DEFAULT 512 enum io_pattern { PATTERN_SEQUENTIAL, PATTERN_RANDOM }; struct io_tracker { enum io_pattern pattern; unsigned nr_seq_samples; unsigned nr_rand_samples; unsigned thresholds[2]; dm_oblock_t last_end_oblock; }; static void iot_init(struct io_tracker *t, int sequential_threshold, int random_threshold) { t->pattern = PATTERN_RANDOM; t->nr_seq_samples = 0; t->nr_rand_samples = 0; t->last_end_oblock = 0; t->thresholds[PATTERN_RANDOM] = random_threshold; t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold; } static enum io_pattern iot_pattern(struct io_tracker *t) { return t->pattern; } static void iot_update_stats(struct io_tracker *t, struct bio *bio) { if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) t->nr_seq_samples++; else { /* * Just one non-sequential IO is enough to reset the * counters. */ if (t->nr_seq_samples) { t->nr_seq_samples = 0; t->nr_rand_samples = 0; } t->nr_rand_samples++; } t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); } static void iot_check_for_pattern_switch(struct io_tracker *t) { switch (t->pattern) { case PATTERN_SEQUENTIAL: if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) { t->pattern = PATTERN_RANDOM; t->nr_seq_samples = t->nr_rand_samples = 0; } break; case PATTERN_RANDOM: if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) { t->pattern = PATTERN_SEQUENTIAL; t->nr_seq_samples = t->nr_rand_samples = 0; } break; } } static void iot_examine_bio(struct io_tracker *t, struct bio *bio) { iot_update_stats(t, bio); iot_check_for_pattern_switch(t); } /*----------------------------------------------------------------*/ /* * This queue is divided up into different levels. Allowing us to push * entries to the back of any of the levels. Think of it as a partially * sorted queue. */ #define NR_QUEUE_LEVELS 16u struct queue { struct list_head qs[NR_QUEUE_LEVELS]; }; static void queue_init(struct queue *q) { unsigned i; for (i = 0; i < NR_QUEUE_LEVELS; i++) INIT_LIST_HEAD(q->qs + i); } /* * Insert an entry to the back of the given level. */ static void queue_push(struct queue *q, unsigned level, struct list_head *elt) { list_add_tail(elt, q->qs + level); } static void queue_remove(struct list_head *elt) { list_del(elt); } /* * Shifts all regions down one level. This has no effect on the order of * the queue. */ static void queue_shift_down(struct queue *q) { unsigned level; for (level = 1; level < NR_QUEUE_LEVELS; level++) list_splice_init(q->qs + level, q->qs + level - 1); } /* * Gives us the oldest entry of the lowest popoulated level. If the first * level is emptied then we shift down one level. */ static struct list_head *queue_pop(struct queue *q) { unsigned level; struct list_head *r; for (level = 0; level < NR_QUEUE_LEVELS; level++) if (!list_empty(q->qs + level)) { r = q->qs[level].next; list_del(r); /* have we just emptied the bottom level? */ if (level == 0 && list_empty(q->qs)) queue_shift_down(q); return r; } return NULL; } static struct list_head *list_pop(struct list_head *lh) { struct list_head *r = lh->next; BUG_ON(!r); list_del_init(r); return r; } /*----------------------------------------------------------------*/ /* * Describes a cache entry. Used in both the cache and the pre_cache. */ struct entry { struct hlist_node hlist; struct list_head list; dm_oblock_t oblock; dm_cblock_t cblock; /* valid iff in_cache */ /* * FIXME: pack these better */ bool in_cache:1; unsigned hit_count; unsigned generation; unsigned tick; }; struct mq_policy { struct dm_cache_policy policy; /* protects everything */ struct mutex lock; dm_cblock_t cache_size; struct io_tracker tracker; /* * We maintain two queues of entries. The cache proper contains * the currently active mappings. Whereas the pre_cache tracks * blocks that are being hit frequently and potential candidates * for promotion to the cache. */ struct queue pre_cache; struct queue cache; /* * Keeps track of time, incremented by the core. We use this to * avoid attributing multiple hits within the same tick. * * Access to tick_protected should be done with the spin lock held. * It's copied to tick at the start of the map function (within the * mutex). */ spinlock_t tick_lock; unsigned tick_protected; unsigned tick; /* * A count of the number of times the map function has been called * and found an entry in the pre_cache or cache. Currently used to * calculate the generation. */ unsigned hit_count; /* * A generation is a longish period that is used to trigger some * book keeping effects. eg, decrementing hit counts on entries. * This is needed to allow the cache to evolve as io patterns * change. */ unsigned generation; unsigned generation_period; /* in lookups (will probably change) */ /* * Entries in the pre_cache whose hit count passes the promotion * threshold move to the cache proper. Working out the correct * value for the promotion_threshold is crucial to this policy. */ unsigned promote_threshold; /* * We need cache_size entries for the cache, and choose to have * cache_size entries for the pre_cache too. One motivation for * using the same size is to make the hit counts directly * comparable between pre_cache and cache. */ unsigned nr_entries; unsigned nr_entries_allocated; struct list_head free; /* * Cache blocks may be unallocated. We store this info in a * bitset. */ unsigned long *allocation_bitset; unsigned nr_cblocks_allocated; unsigned find_free_nr_words; unsigned find_free_last_word; /* * The hash table allows us to quickly find an entry by origin * block. Both pre_cache and cache entries are in here. */ unsigned nr_buckets; dm_block_t hash_bits; struct hlist_head *table; }; /*----------------------------------------------------------------*/ /* Free/alloc mq cache entry structures. */ static void takeout_queue(struct list_head *lh, struct queue *q) { unsigned level; for (level = 0; level < NR_QUEUE_LEVELS; level++) list_splice(q->qs + level, lh); } static void free_entries(struct mq_policy *mq) { struct entry *e, *tmp; takeout_queue(&mq->free, &mq->pre_cache); takeout_queue(&mq->free, &mq->cache); list_for_each_entry_safe(e, tmp, &mq->free, list) kmem_cache_free(mq_entry_cache, e); } static int alloc_entries(struct mq_policy *mq, unsigned elts) { unsigned u = mq->nr_entries; INIT_LIST_HEAD(&mq->free); mq->nr_entries_allocated = 0; while (u--) { struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL); if (!e) { free_entries(mq); return -ENOMEM; } list_add(&e->list, &mq->free); } return 0; } /*----------------------------------------------------------------*/ /* * Simple hash table implementation. Should replace with the standard hash * table that's making its way upstream. */ static void hash_insert(struct mq_policy *mq, struct entry *e) { unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); hlist_add_head(&e->hlist, mq->table + h); } static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) { unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); struct hlist_head *bucket = mq->table + h; struct entry *e; hlist_for_each_entry(e, bucket, hlist) if (e->oblock == oblock) { hlist_del(&e->hlist); hlist_add_head(&e->hlist, bucket); return e; } return NULL; } static void hash_remove(struct entry *e) { hlist_del(&e->hlist); } /*----------------------------------------------------------------*/ /* * Allocates a new entry structure. The memory is allocated in one lump, * so we just handing it out here. Returns NULL if all entries have * already been allocated. Cannot fail otherwise. */ static struct entry *alloc_entry(struct mq_policy *mq) { struct entry *e; if (mq->nr_entries_allocated >= mq->nr_entries) { BUG_ON(!list_empty(&mq->free)); return NULL; } e = list_entry(list_pop(&mq->free), struct entry, list); INIT_LIST_HEAD(&e->list); INIT_HLIST_NODE(&e->hlist); mq->nr_entries_allocated++; return e; } /*----------------------------------------------------------------*/ /* * Mark cache blocks allocated or not in the bitset. */ static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock) { BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset)); set_bit(from_cblock(cblock), mq->allocation_bitset); mq->nr_cblocks_allocated++; } static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock) { BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size)); BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset)); clear_bit(from_cblock(cblock), mq->allocation_bitset); mq->nr_cblocks_allocated--; } static bool any_free_cblocks(struct mq_policy *mq) { return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); } /* * Fills result out with a cache block that isn't in use, or return * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is * reponsible for that. */ static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end, dm_cblock_t *result, unsigned *last_word) { int r = -ENOSPC; unsigned w; for (w = begin; w < end; w++) { /* * ffz is undefined if no zero exists */ if (mq->allocation_bitset[w] != ~0UL) { *last_word = w; *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w])); if (from_cblock(*result) < from_cblock(mq->cache_size)) r = 0; break; } } return r; } static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result) { int r; if (!any_free_cblocks(mq)) return -ENOSPC; r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word); if (r == -ENOSPC && mq->find_free_last_word) r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word); return r; } /*----------------------------------------------------------------*/ /* * Now we get to the meat of the policy. This section deals with deciding * when to to add entries to the pre_cache and cache, and move between * them. */ /* * The queue level is based on the log2 of the hit count. */ static unsigned queue_level(struct entry *e) { return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u); } /* * Inserts the entry into the pre_cache or the cache. Ensures the cache * block is marked as allocated if necc. Inserts into the hash table. Sets the * tick which records when the entry was last moved about. */ static void push(struct mq_policy *mq, struct entry *e) { e->tick = mq->tick; hash_insert(mq, e); if (e->in_cache) { alloc_cblock(mq, e->cblock); queue_push(&mq->cache, queue_level(e), &e->list); } else queue_push(&mq->pre_cache, queue_level(e), &e->list); } /* * Removes an entry from pre_cache or cache. Removes from the hash table. * Frees off the cache block if necc. */ static void del(struct mq_policy *mq, struct entry *e) { queue_remove(&e->list); hash_remove(e); if (e->in_cache) free_cblock(mq, e->cblock); } /* * Like del, except it removes the first entry in the queue (ie. the least * recently used). */ static struct entry *pop(struct mq_policy *mq, struct queue *q) { struct entry *e = container_of(queue_pop(q), struct entry, list); if (e) { hash_remove(e); if (e->in_cache) free_cblock(mq, e->cblock); } return e; } /* * Has this entry already been updated? */ static bool updated_this_tick(struct mq_policy *mq, struct entry *e) { return mq->tick == e->tick; } /* * The promotion threshold is adjusted every generation. As are the counts * of the entries. * * At the moment the threshold is taken by averaging the hit counts of some * of the entries in the cache (the first 20 entries of the first level). * * We can be much cleverer than this though. For example, each promotion * could bump up the threshold helping to prevent churn. Much more to do * here. */ #define MAX_TO_AVERAGE 20 static void check_generation(struct mq_policy *mq) { unsigned total = 0, nr = 0, count = 0, level; struct list_head *head; struct entry *e; if ((mq->hit_count >= mq->generation_period) && (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) { mq->hit_count = 0; mq->generation++; for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { head = mq->cache.qs + level; list_for_each_entry(e, head, list) { nr++; total += e->hit_count; if (++count >= MAX_TO_AVERAGE) break; } } mq->promote_threshold = nr ? total / nr : 1; if (mq->promote_threshold * nr < total) mq->promote_threshold++; } } /* * Whenever we use an entry we bump up it's hit counter, and push it to the * back to it's current level. */ static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e) { if (updated_this_tick(mq, e)) return; e->hit_count++; mq->hit_count++; check_generation(mq); /* generation adjustment, to stop the counts increasing forever. */ /* FIXME: divide? */ /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */ e->generation = mq->generation; del(mq, e); push(mq, e); } /* * Demote the least recently used entry from the cache to the pre_cache. * Returns the new cache entry to use, and the old origin block it was * mapped to. * * We drop the hit count on the demoted entry back to 1 to stop it bouncing * straight back into the cache if it's subsequently hit. There are * various options here, and more experimentation would be good: * * - just forget about the demoted entry completely (ie. don't insert it into the pre_cache). * - divide the hit count rather that setting to some hard coded value. * - set the hit count to a hard coded value other than 1, eg, is it better * if it goes in at level 2? */ static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock) { dm_cblock_t result; struct entry *demoted = pop(mq, &mq->cache); BUG_ON(!demoted); result = demoted->cblock; *oblock = demoted->oblock; demoted->in_cache = false; demoted->hit_count = 1; push(mq, demoted); return result; } /* * We modify the basic promotion_threshold depending on the specific io. * * If the origin block has been discarded then there's no cost to copy it * to the cache. * * We bias towards reads, since they can be demoted at no cost if they * haven't been dirtied. */ #define DISCARDED_PROMOTE_THRESHOLD 1 #define READ_PROMOTE_THRESHOLD 4 #define WRITE_PROMOTE_THRESHOLD 8 static unsigned adjusted_promote_threshold(struct mq_policy *mq, bool discarded_oblock, int data_dir) { if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE) /* * We don't need to do any copying at all, so give this a * very low threshold. In practice this only triggers * during initial population after a format. */ return DISCARDED_PROMOTE_THRESHOLD; return data_dir == READ ? (mq->promote_threshold + READ_PROMOTE_THRESHOLD) : (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD); } static bool should_promote(struct mq_policy *mq, struct entry *e, bool discarded_oblock, int data_dir) { return e->hit_count >= adjusted_promote_threshold(mq, discarded_oblock, data_dir); } static int cache_entry_found(struct mq_policy *mq, struct entry *e, struct policy_result *result) { requeue_and_update_tick(mq, e); if (e->in_cache) { result->op = POLICY_HIT; result->cblock = e->cblock; } return 0; } /* * Moves and entry from the pre_cache to the cache. The main work is * finding which cache block to use. */ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, struct policy_result *result) { dm_cblock_t cblock; if (find_free_cblock(mq, &cblock) == -ENOSPC) { result->op = POLICY_REPLACE; cblock = demote_cblock(mq, &result->old_oblock); } else result->op = POLICY_NEW; result->cblock = e->cblock = cblock; del(mq, e); e->in_cache = true; push(mq, e); return 0; } static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) { int r = 0; bool updated = updated_this_tick(mq, e); requeue_and_update_tick(mq, e); if ((!discarded_oblock && updated) || !should_promote(mq, e, discarded_oblock, data_dir)) result->op = POLICY_MISS; else if (!can_migrate) r = -EWOULDBLOCK; else r = pre_cache_to_cache(mq, e, result); return r; } static void insert_in_pre_cache(struct mq_policy *mq, dm_oblock_t oblock) { struct entry *e = alloc_entry(mq); if (!e) /* * There's no spare entry structure, so we grab the least * used one from the pre_cache. */ e = pop(mq, &mq->pre_cache); if (unlikely(!e)) { DMWARN("couldn't pop from pre cache"); return; } e->in_cache = false; e->oblock = oblock; e->hit_count = 1; e->generation = mq->generation; push(mq, e); } static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, struct policy_result *result) { struct entry *e; dm_cblock_t cblock; if (find_free_cblock(mq, &cblock) == -ENOSPC) { result->op = POLICY_MISS; insert_in_pre_cache(mq, oblock); return; } e = alloc_entry(mq); if (unlikely(!e)) { result->op = POLICY_MISS; return; } e->oblock = oblock; e->cblock = cblock; e->in_cache = true; e->hit_count = 1; e->generation = mq->generation; push(mq, e); result->op = POLICY_NEW; result->cblock = e->cblock; } static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) { if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) { if (can_migrate) insert_in_cache(mq, oblock, result); else return -EWOULDBLOCK; } else { insert_in_pre_cache(mq, oblock); result->op = POLICY_MISS; } return 0; } /* * Looks the oblock up in the hash table, then decides whether to put in * pre_cache, or cache etc. */ static int map(struct mq_policy *mq, dm_oblock_t oblock, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) { int r = 0; struct entry *e = hash_lookup(mq, oblock); if (e && e->in_cache) r = cache_entry_found(mq, e, result); else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) result->op = POLICY_MISS; else if (e) r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock, data_dir, result); else r = no_entry_found(mq, oblock, can_migrate, discarded_oblock, data_dir, result); if (r == -EWOULDBLOCK) result->op = POLICY_MISS; return r; } /*----------------------------------------------------------------*/ /* * Public interface, via the policy struct. See dm-cache-policy.h for a * description of these. */ static struct mq_policy *to_mq_policy(struct dm_cache_policy *p) { return container_of(p, struct mq_policy, policy); } static void mq_destroy(struct dm_cache_policy *p) { struct mq_policy *mq = to_mq_policy(p); free_bitset(mq->allocation_bitset); kfree(mq->table); free_entries(mq); kfree(mq); } static void copy_tick(struct mq_policy *mq) { unsigned long flags; spin_lock_irqsave(&mq->tick_lock, flags); mq->tick = mq->tick_protected; spin_unlock_irqrestore(&mq->tick_lock, flags); } static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock, bool can_block, bool can_migrate, bool discarded_oblock, struct bio *bio, struct policy_result *result) { int r; struct mq_policy *mq = to_mq_policy(p); result->op = POLICY_MISS; if (can_block) mutex_lock(&mq->lock); else if (!mutex_trylock(&mq->lock)) return -EWOULDBLOCK; copy_tick(mq); iot_examine_bio(&mq->tracker, bio); r = map(mq, oblock, can_migrate, discarded_oblock, bio_data_dir(bio), result); mutex_unlock(&mq->lock); return r; } static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) { int r; struct mq_policy *mq = to_mq_policy(p); struct entry *e; if (!mutex_trylock(&mq->lock)) return -EWOULDBLOCK; e = hash_lookup(mq, oblock); if (e && e->in_cache) { *cblock = e->cblock; r = 0; } else r = -ENOENT; mutex_unlock(&mq->lock); return r; } static int mq_load_mapping(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t cblock, uint32_t hint, bool hint_valid) { struct mq_policy *mq = to_mq_policy(p); struct entry *e; e = alloc_entry(mq); if (!e) return -ENOMEM; e->cblock = cblock; e->oblock = oblock; e->in_cache = true; e->hit_count = hint_valid ? hint : 1; e->generation = mq->generation; push(mq, e); return 0; } static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, void *context) { struct mq_policy *mq = to_mq_policy(p); int r = 0; struct entry *e; unsigned level; mutex_lock(&mq->lock); for (level = 0; level < NR_QUEUE_LEVELS; level++) list_for_each_entry(e, &mq->cache.qs[level], list) { r = fn(context, e->cblock, e->oblock, e->hit_count); if (r) goto out; } out: mutex_unlock(&mq->lock); return r; } static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) { struct entry *e = hash_lookup(mq, oblock); BUG_ON(!e || !e->in_cache); del(mq, e); e->in_cache = false; push(mq, e); } static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) { struct mq_policy *mq = to_mq_policy(p); mutex_lock(&mq->lock); remove_mapping(mq, oblock); mutex_unlock(&mq->lock); } static void force_mapping(struct mq_policy *mq, dm_oblock_t current_oblock, dm_oblock_t new_oblock) { struct entry *e = hash_lookup(mq, current_oblock); BUG_ON(!e || !e->in_cache); del(mq, e); e->oblock = new_oblock; push(mq, e); } static void mq_force_mapping(struct dm_cache_policy *p, dm_oblock_t current_oblock, dm_oblock_t new_oblock) { struct mq_policy *mq = to_mq_policy(p); mutex_lock(&mq->lock); force_mapping(mq, current_oblock, new_oblock); mutex_unlock(&mq->lock); } static dm_cblock_t mq_residency(struct dm_cache_policy *p) { struct mq_policy *mq = to_mq_policy(p); /* FIXME: lock mutex, not sure we can block here */ return to_cblock(mq->nr_cblocks_allocated); } static void mq_tick(struct dm_cache_policy *p) { struct mq_policy *mq = to_mq_policy(p); unsigned long flags; spin_lock_irqsave(&mq->tick_lock, flags); mq->tick_protected++; spin_unlock_irqrestore(&mq->tick_lock, flags); } static int mq_set_config_value(struct dm_cache_policy *p, const char *key, const char *value) { struct mq_policy *mq = to_mq_policy(p); enum io_pattern pattern; unsigned long tmp; if (!strcasecmp(key, "random_threshold")) pattern = PATTERN_RANDOM; else if (!strcasecmp(key, "sequential_threshold")) pattern = PATTERN_SEQUENTIAL; else return -EINVAL; if (kstrtoul(value, 10, &tmp)) return -EINVAL; mq->tracker.thresholds[pattern] = tmp; return 0; } static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen) { ssize_t sz = 0; struct mq_policy *mq = to_mq_policy(p); DMEMIT("4 random_threshold %u sequential_threshold %u", mq->tracker.thresholds[PATTERN_RANDOM], mq->tracker.thresholds[PATTERN_SEQUENTIAL]); return 0; } /* Init the policy plugin interface function pointers. */ static void init_policy_functions(struct mq_policy *mq) { mq->policy.destroy = mq_destroy; mq->policy.map = mq_map; mq->policy.lookup = mq_lookup; mq->policy.load_mapping = mq_load_mapping; mq->policy.walk_mappings = mq_walk_mappings; mq->policy.remove_mapping = mq_remove_mapping; mq->policy.writeback_work = NULL; mq->policy.force_mapping = mq_force_mapping; mq->policy.residency = mq_residency; mq->policy.tick = mq_tick; mq->policy.emit_config_values = mq_emit_config_values; mq->policy.set_config_value = mq_set_config_value; } static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { int r; struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); if (!mq) return NULL; init_policy_functions(mq); iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); mq->cache_size = cache_size; mq->tick_protected = 0; mq->tick = 0; mq->hit_count = 0; mq->generation = 0; mq->promote_threshold = 0; mutex_init(&mq->lock); spin_lock_init(&mq->tick_lock); mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG); mq->find_free_last_word = 0; queue_init(&mq->pre_cache); queue_init(&mq->cache); mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); mq->nr_entries = 2 * from_cblock(cache_size); r = alloc_entries(mq, mq->nr_entries); if (r) goto bad_cache_alloc; mq->nr_entries_allocated = 0; mq->nr_cblocks_allocated = 0; mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); mq->hash_bits = ffs(mq->nr_buckets) - 1; mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); if (!mq->table) goto bad_alloc_table; mq->allocation_bitset = alloc_bitset(from_cblock(cache_size)); if (!mq->allocation_bitset) goto bad_alloc_bitset; return &mq->policy; bad_alloc_bitset: kfree(mq->table); bad_alloc_table: free_entries(mq); bad_cache_alloc: kfree(mq); return NULL; } /*----------------------------------------------------------------*/ static struct dm_cache_policy_type mq_policy_type = { .name = "mq", .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create }; static struct dm_cache_policy_type default_policy_type = { .name = "default", .version = {1, 0, 0}, .hint_size = 4, .owner = THIS_MODULE, .create = mq_create }; static int __init mq_init(void) { int r; mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry", sizeof(struct entry), __alignof__(struct entry), 0, NULL); if (!mq_entry_cache) goto bad; r = dm_cache_policy_register(&mq_policy_type); if (r) { DMERR("register failed %d", r); goto bad_register_mq; } r = dm_cache_policy_register(&default_policy_type); if (!r) { DMINFO("version %u.%u.%u loaded", mq_policy_type.version[0], mq_policy_type.version[1], mq_policy_type.version[2]); return 0; } DMERR("register failed (as default) %d", r); dm_cache_policy_unregister(&mq_policy_type); bad_register_mq: kmem_cache_destroy(mq_entry_cache); bad: return -ENOMEM; } static void __exit mq_exit(void) { dm_cache_policy_unregister(&mq_policy_type); dm_cache_policy_unregister(&default_policy_type); kmem_cache_destroy(mq_entry_cache); } module_init(mq_init); module_exit(mq_exit); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mq cache policy"); MODULE_ALIAS("dm-cache-default");
gpl-2.0
Team-Hydra/android_kernel_moto_shamu
drivers/gpu/drm/ttm/ttm_bo_vm.c
2136
11581
/************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ #define pr_fmt(fmt) "[TTM] " fmt #include <ttm/ttm_module.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> #include <linux/mm.h> #include <linux/rbtree.h> #include <linux/module.h> #include <linux/uaccess.h> #define TTM_BO_VM_NUM_PREFAULT 16 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, unsigned long page_start, unsigned long num_pages) { struct rb_node *cur = bdev->addr_space_rb.rb_node; unsigned long cur_offset; struct ttm_buffer_object *bo; struct ttm_buffer_object *best_bo = NULL; while (likely(cur != NULL)) { bo = rb_entry(cur, struct ttm_buffer_object, vm_rb); cur_offset = bo->vm_node->start; if (page_start >= cur_offset) { cur = cur->rb_right; best_bo = bo; if (page_start == cur_offset) break; } else cur = cur->rb_left; } if (unlikely(best_bo == NULL)) return NULL; if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < (page_start + num_pages))) return NULL; return best_bo; } static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; struct ttm_bo_device *bdev = bo->bdev; unsigned long page_offset; unsigned long page_last; unsigned long pfn; struct ttm_tt *ttm = NULL; struct page *page; int ret; int i; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; /* * Work around locking order reversal in fault / nopfn * between mmap_sem and bo_reserve: Perform a trylock operation * for reserve, and if it fails, retry the fault after scheduling. */ ret = ttm_bo_reserve(bo, true, true, false, 0); if (unlikely(ret != 0)) { if (ret == -EBUSY) set_need_resched(); return VM_FAULT_NOPAGE; } if (bdev->driver->fault_reserve_notify) { ret = bdev->driver->fault_reserve_notify(bo); switch (ret) { case 0: break; case -EBUSY: set_need_resched(); case -ERESTARTSYS: retval = VM_FAULT_NOPAGE; goto out_unlock; default: retval = VM_FAULT_SIGBUS; goto out_unlock; } } /* * Wait for buffer data in transit, due to a pipelined * move. */ spin_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } } else spin_unlock(&bdev->fence_lock); ret = ttm_mem_io_lock(man, true); if (unlikely(ret != 0)) { retval = VM_FAULT_NOPAGE; goto out_unlock; } ret = ttm_mem_io_reserve_vm(bo); if (unlikely(ret != 0)) { retval = VM_FAULT_SIGBUS; goto out_io_unlock; } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + bo->vm_node->start - vma->vm_pgoff; page_last = vma_pages(vma) + bo->vm_node->start - vma->vm_pgoff; if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; goto out_io_unlock; } /* * Strictly, we're not allowed to modify vma->vm_page_prot here, * since the mmap_sem is only held in read mode. However, we * modify only the caching bits of vma->vm_page_prot and * consider those bits protected by * the bo->mutex, as we should be the only writers. * There shouldn't really be any readers of these bits except * within vm_insert_mixed()? fork? * * TODO: Add a list of vmas to the bo, and change the * vma->vm_page_prot when the object changes caching policy, with * the correct locks held. */ if (bo->mem.bus.is_iomem) { vma->vm_page_prot = ttm_io_prot(bo->mem.placement, vma->vm_page_prot); } else { ttm = bo->ttm; vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? vm_get_page_prot(vma->vm_flags) : ttm_io_prot(bo->mem.placement, vma->vm_page_prot); /* Allocate all page at once, most common usage */ if (ttm->bdev->driver->ttm_tt_populate(ttm)) { retval = VM_FAULT_OOM; goto out_io_unlock; } } /* * Speculatively prefault a number of pages. Only error on * first page. */ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { if (bo->mem.bus.is_iomem) pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; goto out_io_unlock; } else if (unlikely(!page)) { break; } pfn = page_to_pfn(page); } ret = vm_insert_mixed(vma, address, pfn); /* * Somebody beat us to this PTE or prefaulting to * an already populated PTE, or prefaulting error. */ if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0))) break; else if (unlikely(ret != 0)) { retval = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_io_unlock; } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) break; } out_io_unlock: ttm_mem_io_unlock(man); out_unlock: ttm_bo_unreserve(bo); return retval; } static void ttm_bo_vm_open(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; (void)ttm_bo_reference(bo); } static void ttm_bo_vm_close(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_unref(&bo); vma->vm_private_data = NULL; } static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close }; int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { struct ttm_bo_driver *driver; struct ttm_buffer_object *bo; int ret; read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, vma_pages(vma)); if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref)) bo = NULL; read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) { pr_err("Could not find buffer object to map\n"); return -EINVAL; } driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } ret = driver->verify_access(bo, filp); if (unlikely(ret != 0)) goto out_unref; vma->vm_ops = &ttm_bo_vm_ops; /* * Note: We're transferring the bo reference to * vma->vm_private_data here. */ vma->vm_private_data = bo; vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; return 0; out_unref: ttm_bo_unref(&bo); return ret; } EXPORT_SYMBOL(ttm_bo_mmap); int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { if (vma->vm_pgoff != 0) return -EACCES; vma->vm_ops = &ttm_bo_vm_ops; vma->vm_private_data = ttm_bo_reference(bo); vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; return 0; } EXPORT_SYMBOL(ttm_fbdev_mmap); ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) { struct ttm_buffer_object *bo; struct ttm_bo_driver *driver; struct ttm_bo_kmap_obj map; unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); unsigned long kmap_offset; unsigned long kmap_end; unsigned long kmap_num; size_t io_size; unsigned int page_offset; char *virtual; int ret; bool no_wait = false; bool dummy; read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); if (likely(bo != NULL)) ttm_bo_reference(bo); read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) return -EFAULT; driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } ret = driver->verify_access(bo, filp); if (unlikely(ret != 0)) goto out_unref; kmap_offset = dev_offset - bo->vm_node->start; if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; } page_offset = *f_pos & ~PAGE_MASK; io_size = bo->num_pages - kmap_offset; io_size = (io_size << PAGE_SHIFT) - page_offset; if (count < io_size) io_size = count; kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; kmap_num = kmap_end - kmap_offset + 1; ret = ttm_bo_reserve(bo, true, no_wait, false, 0); switch (ret) { case 0: break; case -EBUSY: ret = -EAGAIN; goto out_unref; default: goto out_unref; } ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); goto out_unref; } virtual = ttm_kmap_obj_virtual(&map, &dummy); virtual += page_offset; if (write) ret = copy_from_user(virtual, wbuf, io_size); else ret = copy_to_user(rbuf, virtual, io_size); ttm_bo_kunmap(&map); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); if (unlikely(ret != 0)) return -EFBIG; *f_pos += io_size; return io_size; out_unref: ttm_bo_unref(&bo); return ret; } ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) { struct ttm_bo_kmap_obj map; unsigned long kmap_offset; unsigned long kmap_end; unsigned long kmap_num; size_t io_size; unsigned int page_offset; char *virtual; int ret; bool no_wait = false; bool dummy; kmap_offset = (*f_pos >> PAGE_SHIFT); if (unlikely(kmap_offset >= bo->num_pages)) return -EFBIG; page_offset = *f_pos & ~PAGE_MASK; io_size = bo->num_pages - kmap_offset; io_size = (io_size << PAGE_SHIFT) - page_offset; if (count < io_size) io_size = count; kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; kmap_num = kmap_end - kmap_offset + 1; ret = ttm_bo_reserve(bo, true, no_wait, false, 0); switch (ret) { case 0: break; case -EBUSY: return -EAGAIN; default: return ret; } ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); return ret; } virtual = ttm_kmap_obj_virtual(&map, &dummy); virtual += page_offset; if (write) ret = copy_from_user(virtual, wbuf, io_size); else ret = copy_to_user(rbuf, virtual, io_size); ttm_bo_kunmap(&map); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); if (unlikely(ret != 0)) return ret; *f_pos += io_size; return io_size; }
gpl-2.0
chruck/cpsc8220
linux-4.3.3/drivers/tty/hvc/hvc_rtas.c
2136
3401
/* * IBM RTAS driver interface to hvc_console.c * * (C) Copyright IBM Corporation 2001-2005 * (C) Copyright Red Hat, Inc. 2005 * * Author(s): Maximino Augilar <IBM STI Design Center> * : Ryan S. Arnold <rsa@us.ibm.com> * : Utz Bacher <utz.bacher@de.ibm.com> * : David Woodhouse <dwmw2@infradead.org> * * inspired by drivers/char/hvc_console.c * written by Anton Blanchard and Paul Mackerras * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/console.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <asm/irq.h> #include <asm/rtas.h> #include "hvc_console.h" #define hvc_rtas_cookie 0x67781e15 struct hvc_struct *hvc_rtas_dev; static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE; static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE; static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf, int count) { int i; for (i = 0; i < count; i++) { if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i])) break; } return i; } static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count) { int i, c; for (i = 0; i < count; i++) { if (rtas_call(rtascons_get_char_token, 0, 2, &c)) break; buf[i] = c; } return i; } static const struct hv_ops hvc_rtas_get_put_ops = { .get_chars = hvc_rtas_read_console, .put_chars = hvc_rtas_write_console, }; static int __init hvc_rtas_init(void) { struct hvc_struct *hp; if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) rtascons_put_char_token = rtas_token("put-term-char"); if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) return -EIO; if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) rtascons_get_char_token = rtas_token("get-term-char"); if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) return -EIO; BUG_ON(hvc_rtas_dev); /* Allocate an hvc_struct for the console device we instantiated * earlier. Save off hp so that we can return it on exit */ hp = hvc_alloc(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops, 16); if (IS_ERR(hp)) return PTR_ERR(hp); hvc_rtas_dev = hp; return 0; } device_initcall(hvc_rtas_init); /* This will happen prior to module init. There is no tty at this time? */ static int __init hvc_rtas_console_init(void) { rtascons_put_char_token = rtas_token("put-term-char"); if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE) return -EIO; rtascons_get_char_token = rtas_token("get-term-char"); if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE) return -EIO; hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops); add_preferred_console("hvc", 0, NULL); return 0; } console_initcall(hvc_rtas_console_init);
gpl-2.0
Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary
drivers/scsi/fnic/fnic_scsi.c
2136
68543
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mempool.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/delay.h> #include <linux/gfp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "fnic_io.h" #include "fnic.h" const char *fnic_state_str[] = { [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE", [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE", }; static const char *fnic_ioreq_state_str[] = { [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED", [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING", [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING", [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE", [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE", }; static const char *fcpio_status_str[] = { [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/ [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER", [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE", [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]", [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED", [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND", [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/ [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT", [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID", [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID", [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH", [FCPIO_FW_ERR] = "FCPIO_FW_ERR", [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED", [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED", [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN", [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED", [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL", [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED", [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", }; const char *fnic_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) return "unknown"; return fnic_state_str[state]; } static const char *fnic_ioreq_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_ioreq_state_str) || !fnic_ioreq_state_str[state]) return "unknown"; return fnic_ioreq_state_str[state]; } static const char *fnic_fcpio_status_to_str(unsigned int status) { if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status]) return "unknown"; return fcpio_status_str[status]; } static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, struct scsi_cmnd *sc) { u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1); return &fnic->io_req_lock[hash]; } /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. */ static void fnic_release_ioreq_buf(struct fnic *fnic, struct fnic_io_req *io_req, struct scsi_cmnd *sc) { if (io_req->sgl_list_pa) pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, PCI_DMA_TODEVICE); scsi_dma_unmap(sc); if (io_req->sgl_cnt) mempool_free(io_req->sgl_list_alloc, fnic->io_sgl_pool[io_req->sgl_type]); if (io_req->sense_buf_pa) pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); } /* Free up Copy Wq descriptors. Called with copy_wq lock held */ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) { /* if no Ack received from firmware, then nothing to clean */ if (!fnic->fw_ack_recd[0]) return 1; /* * Update desc_available count based on number of freed descriptors * Account for wraparound */ if (wq->to_clean_index <= fnic->fw_ack_index[0]) wq->ring.desc_avail += (fnic->fw_ack_index[0] - wq->to_clean_index + 1); else wq->ring.desc_avail += (wq->ring.desc_count - wq->to_clean_index + fnic->fw_ack_index[0] + 1); /* * just bump clean index to ack_index+1 accounting for wraparound * this will essentially free up all descriptors between * to_clean_index and fw_ack_index, both inclusive */ wq->to_clean_index = (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; /* we have processed the acks received so far */ fnic->fw_ack_recd[0] = 0; return 0; } /** * __fnic_set_state_flags * Sets/Clears bits in fnic's state_flags **/ void __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, unsigned long clearbits) { struct Scsi_Host *host = fnic->lport->host; int sh_locked = spin_is_locked(host->host_lock); unsigned long flags = 0; if (!sh_locked) spin_lock_irqsave(host->host_lock, flags); if (clearbits) fnic->state_flags &= ~st_flags; else fnic->state_flags |= st_flags; if (!sh_locked) spin_unlock_irqrestore(host->host_lock, flags); return; } /* * fnic_fw_reset_handler * Routine to send reset msg to fw */ int fnic_fw_reset_handler(struct fnic *fnic) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; int ret = 0; unsigned long flags; /* indicate fwreset to io path */ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); /* wait for io cmpl */ while (atomic_read(&fnic->in_flight)) schedule_timeout(msecs_to_jiffies(1)); spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; else fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); if (!ret) FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Issued fw reset\n"); else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Failed to issue fw reset\n"); } return ret; } /* * fnic_flogi_reg_handler * Routine to send flogi register msg to fw */ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; enum fcpio_flogi_reg_format_type format; struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { ret = -EAGAIN; goto flogi_reg_ioreq_end; } if (fnic->ctlr.map_dest) { memset(gw_mac, 0xff, ETH_ALEN); format = FCPIO_FLOGI_REG_DEF_DEST; } else { memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); format = FCPIO_FLOGI_REG_GW_DEST; } if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fc_id, gw_mac, fnic->data_src_addr, lp->r_a_tov, lp->e_d_tov); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", fc_id, fnic->data_src_addr, gw_mac); } else { fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, format, fc_id, gw_mac); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "FLOGI reg issued fcid %x map %d dest %pM\n", fc_id, fnic->ctlr.map_dest, gw_mac); } flogi_reg_ioreq_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return ret; } /* * fnic_queue_wq_copy_desc * Routine to enqueue a wq copy desc */ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; u8 pri_tag = 0; unsigned int i; unsigned long intr_flags; int flags; u8 exch_flags; struct scsi_lun fc_lun; char msg[2]; if (sg_count) { /* For each SGE, create a device desc entry */ desc = io_req->sgl_list; for_each_sg(scsi_sglist(sc), sg, sg_count, i) { desc->addr = cpu_to_le64(sg_dma_address(sg)); desc->len = cpu_to_le32(sg_dma_len(sg)); desc->_resvd = 0; desc++; } io_req->sgl_list_pa = pci_map_single (fnic->pdev, io_req->sgl_list, sizeof(io_req->sgl_list[0]) * sg_count, PCI_DMA_TODEVICE); } io_req->sense_buf_pa = pci_map_single(fnic->pdev, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); int_to_scsilun(sc->device->lun, &fc_lun); pri_tag = FCPIO_ICMND_PTA_SIMPLE; msg[0] = MSG_SIMPLE_TAG; scsi_populate_tag_msg(sc, msg); if (msg[0] == MSG_ORDERED_TAG) pri_tag = FCPIO_ICMND_PTA_ORDERED; /* Enqueue the descriptor in the Copy WQ */ spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (unlikely(!vnic_wq_copy_desc_avail(wq))) { spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "fnic_queue_wq_copy_desc failure - no descriptors\n"); return SCSI_MLQUEUE_HOST_BUSY; } flags = 0; if (sc->sc_data_direction == DMA_FROM_DEVICE) flags = FCPIO_ICMND_RDDATA; else if (sc->sc_data_direction == DMA_TO_DEVICE) flags = FCPIO_ICMND_WRDATA; exch_flags = 0; if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && (rp->flags & FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, 0, exch_flags, io_req->sgl_cnt, SCSI_SENSE_BUFFERSIZE, io_req->sgl_list_pa, io_req->sense_buf_pa, 0, /* scsi cmd ref, always 0 */ pri_tag, /* scsi pri and tag */ flags, /* command flags */ sc->cmnd, sc->cmd_len, scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, rport->maxframe_size, rp->r_a_tov, rp->e_d_tov); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return 0; } /* * fnic_queuecommand * Routine to send a scsi cdb * Called with host_lock held and interrupts disabled. */ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct fc_lport *lp = shost_priv(sc->device->host); struct fc_rport *rport; struct fnic_io_req *io_req = NULL; struct fnic *fnic = lport_priv(lp); struct vnic_wq_copy *wq; int ret; u64 cmd_trace; int sg_count = 0; unsigned long flags; unsigned long ptr; if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; rport = starget_to_rport(scsi_target(sc->device)); ret = fc_remote_port_chkready(rport); if (ret) { sc->result = ret; done(sc); return 0; } if (lp->state != LPORT_ST_READY || !(lp->link_up)) return SCSI_MLQUEUE_HOST_BUSY; atomic_inc(&fnic->in_flight); /* * Release host lock, use driver resource specific locks from here. * Don't re-enable interrupts in case they were disabled prior to the * caller disabling them. */ spin_unlock(lp->host->host_lock); CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED; CMD_FLAGS(sc) = FNIC_NO_FLAGS; /* Get a new io_req for this SCSI IO */ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { ret = SCSI_MLQUEUE_HOST_BUSY; goto out; } memset(io_req, 0, sizeof(*io_req)); /* Map the data buffer */ sg_count = scsi_dma_map(sc); if (sg_count < 0) { FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, sc->request->tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc)); mempool_free(io_req, fnic->io_req_pool); goto out; } /* Determine the type of scatter/gather list we need */ io_req->sgl_cnt = sg_count; io_req->sgl_type = FNIC_SGL_CACHE_DFLT; if (sg_count > FNIC_DFLT_SG_DESC_CNT) io_req->sgl_type = FNIC_SGL_CACHE_MAX; if (sg_count) { io_req->sgl_list = mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], GFP_ATOMIC); if (!io_req->sgl_list) { ret = SCSI_MLQUEUE_HOST_BUSY; scsi_dma_unmap(sc); mempool_free(io_req, fnic->io_req_pool); goto out; } /* Cache sgl list allocated address before alignment */ io_req->sgl_list_alloc = io_req->sgl_list; ptr = (unsigned long) io_req->sgl_list; if (ptr % FNIC_SG_DESC_ALIGN) { io_req->sgl_list = (struct host_sg_desc *) (((unsigned long) ptr + FNIC_SG_DESC_ALIGN - 1) & ~(FNIC_SG_DESC_ALIGN - 1)); } } /* initialize rest of io_req */ io_req->port_id = rport->port_id; io_req->start_time = jiffies; CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; CMD_SP(sc) = (char *)io_req; CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED; sc->scsi_done = done; /* create copy wq desc and enqueue it */ wq = &fnic->wq_copy[0]; ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); if (ret) { /* * In case another thread cancelled the request, * refetch the pointer under the lock. */ spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc); FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, sc->request->tag, sc, 0, 0, 0, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); CMD_SP(sc) = NULL; CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; spin_unlock_irqrestore(io_lock, flags); if (io_req) { fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } } else { /* REVISIT: Use per IO lock in the final code */ CMD_FLAGS(sc) |= FNIC_IO_ISSUED; } out: cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 | (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]); FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, sc->request->tag, sc, io_req, sg_count, cmd_trace, (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); atomic_dec(&fnic->in_flight); /* acquire host lock before returning to SCSI */ spin_lock(lp->host->host_lock); return ret; } DEF_SCSI_QCMD(fnic_queuecommand) /* * fnic_fcpio_fw_reset_cmpl_handler * Routine to handle fw reset completion */ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; int ret = 0; unsigned long flags; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); /* Clean up all outstanding io requests */ fnic_cleanup_io(fnic, SCSI_NO_TAG); spin_lock_irqsave(&fnic->fnic_lock, flags); /* fnic should be in FC_TRANS_ETH_MODE */ if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "reset cmpl success\n"); /* Ready to send flogi out */ fnic->state = FNIC_IN_ETH_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic fw_reset : failed %s\n", fnic_fcpio_status_to_str(hdr_status)); /* * Unable to change to eth mode, cannot send out flogi * Change state to fc mode, so that subsequent Flogi * requests from libFC will cause more attempts to * reset the firmware. Free the cached flogi */ fnic->state = FNIC_IN_FC_MODE; ret = -1; } } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Unexpected state %s while processing" " reset cmpl\n", fnic_state_to_str(fnic->state)); ret = -1; } /* Thread removing device blocks till firmware reset is complete */ if (fnic->remove_wait) complete(fnic->remove_wait); /* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out */ if (fnic->remove_wait || ret) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); skb_queue_purge(&fnic->tx_queue); goto reset_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); fnic_flush_tx(fnic); reset_cmpl_handler_end: fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); return ret; } /* * fnic_fcpio_flogi_reg_cmpl_handler * Routine to handle flogi register completion */ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; int ret = 0; unsigned long flags; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); /* Update fnic state based on status of flogi reg completion */ spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) { /* Check flogi registration completion status */ if (!hdr_status) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "flog reg succeeded\n"); fnic->state = FNIC_IN_FC_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic flogi reg :failed %s\n", fnic_fcpio_status_to_str(hdr_status)); fnic->state = FNIC_IN_ETH_MODE; ret = -1; } } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Unexpected fnic state %s while" " processing flogi reg completion\n", fnic_state_to_str(fnic->state)); ret = -1; } if (!ret) { if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto reg_cmpl_handler_end; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); fnic_flush_tx(fnic); queue_work(fnic_event_queue, &fnic->frame_work); } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); } reg_cmpl_handler_end: return ret; } static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, u16 request_out) { if (wq->to_clean_index <= wq->to_use_index) { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index || request_out >= wq->to_use_index) return 0; } else { /* out of range, stale request_out index */ if (request_out < wq->to_clean_index && request_out >= wq->to_use_index) return 0; } /* request_out index is in range */ return 1; } /* * Mark that ack received and store the Ack index. If there are multiple * acks received before Tx thread cleans it up, the latest value will be * used which is correct behavior. This state should be in the copy Wq * instead of in the fnic */ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, unsigned int cq_index, struct fcpio_fw_req *desc) { struct vnic_wq_copy *wq; u16 request_out = desc->u.ack.request_out; unsigned long flags; u64 *ox_id_tag = (u64 *)(void *)desc; /* mark the ack state */ wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (is_ack_index_in_range(wq, request_out)) { fnic->fw_ack_index[0] = request_out; fnic->fw_ack_recd[0] = 1; } spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); FNIC_TRACE(fnic_fcpio_ack_handler, fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ox_id_tag[4], ox_id_tag[5]); } /* * fnic_fcpio_icmnd_cmpl_handler * Routine to handle icmnd completions */ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; u32 id; u64 xfer_len = 0; struct fcpio_icmnd_cmpl *icmnd_cmpl; struct fnic_io_req *io_req; struct scsi_cmnd *sc; unsigned long flags; spinlock_t *io_lock; u64 cmd_trace; unsigned long start_time; /* Decode the cmpl description to get the io_req id */ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); icmnd_cmpl = &desc->u.icmnd_cmpl; if (id >= FNIC_MAX_IO_REQ) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); return; } sc = scsi_host_find_tag(fnic->lport->host, id); WARN_ON_ONCE(!sc); if (!sc) { shost_printk(KERN_ERR, fnic->lport->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, desc); FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, fnic->lport->host->host_no, id, ((u64)icmnd_cmpl->_resvd0[1] << 16 | (u64)icmnd_cmpl->_resvd0[0]), ((u64)hdr_status << 16 | (u64)icmnd_cmpl->scsi_status << 8 | (u64)icmnd_cmpl->flags), desc, (u64)icmnd_cmpl->residual, 0); return; } io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; spin_unlock_irqrestore(io_lock, flags); shost_printk(KERN_ERR, fnic->lport->host, "icmnd_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); return; } start_time = io_req->start_time; /* firmware completed the io */ io_req->io_completed = 1; /* * if SCSI-ML has already issued abort on this command, * ignore completion of the IO. The abts path will clean it up */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; switch (hdr_status) { case FCPIO_SUCCESS: CMD_FLAGS(sc) |= FNIC_IO_DONE; FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "icmnd_cmpl ABTS pending hdr status = %s " "sc 0x%p scsi_status %x residual %d\n", fnic_fcpio_status_to_str(hdr_status), sc, icmnd_cmpl->scsi_status, icmnd_cmpl->residual); break; case FCPIO_ABORTED: CMD_FLAGS(sc) |= FNIC_IO_ABORTED; break; default: FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "icmnd_cmpl abts pending " "hdr status = %s tag = 0x%x sc = 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); break; } return; } /* Mark the IO as complete */ CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; icmnd_cmpl = &desc->u.icmnd_cmpl; switch (hdr_status) { case FCPIO_SUCCESS: sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status; xfer_len = scsi_bufflen(sc); scsi_set_resid(sc, icmnd_cmpl->residual); if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) xfer_len -= icmnd_cmpl->residual; /* * If queue_full, then try to reduce queue depth for all * LUNS on the target. Todo: this should be accompanied * by a periodic queue_depth rampup based on successful * IO completion. */ if (icmnd_cmpl->scsi_status == QUEUE_FULL) { struct scsi_device *t_sdev; int qd = 0; shost_for_each_device(t_sdev, sc->device->host) { if (t_sdev->id != sc->device->id) continue; if (t_sdev->queue_depth > 1) { qd = scsi_track_queue_full (t_sdev, t_sdev->queue_depth - 1); if (qd == -1) qd = t_sdev->host->cmd_per_lun; shost_printk(KERN_INFO, fnic->lport->host, "scsi[%d:%d:%d:%d" "] queue full detected," "new depth = %d\n", t_sdev->host->host_no, t_sdev->channel, t_sdev->id, t_sdev->lun, t_sdev->queue_depth); } } } break; case FCPIO_TIMEOUT: /* request was timed out */ sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_ABORTED: /* request was aborted */ sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ scsi_set_resid(sc, icmnd_cmpl->residual); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_INVALID_HEADER: /* header contains invalid data */ case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ case FCPIO_FW_ERR: /* request was terminated due fw error */ default: shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); cmd_trace = ((u64)hdr_status << 56) | (u64)icmnd_cmpl->scsi_status << 48 | (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]; FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, sc->device->host->host_no, id, sc, ((u64)icmnd_cmpl->_resvd0[1] << 56 | (u64)icmnd_cmpl->_resvd0[0] << 48 | jiffies_to_msecs(jiffies - start_time)), desc, cmd_trace, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); if (sc->sc_data_direction == DMA_FROM_DEVICE) { fnic->lport->host_stats.fcp_input_requests++; fnic->fcp_input_bytes += xfer_len; } else if (sc->sc_data_direction == DMA_TO_DEVICE) { fnic->lport->host_stats.fcp_output_requests++; fnic->fcp_output_bytes += xfer_len; } else fnic->lport->host_stats.fcp_control_requests++; /* Call SCSI completion function to complete the IO */ if (sc->scsi_done) sc->scsi_done(sc); } /* fnic_fcpio_itmf_cmpl_handler * Routine to handle itmf completions */ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, struct fcpio_fw_req *desc) { u8 type; u8 hdr_status; struct fcpio_tag tag; u32 id; struct scsi_cmnd *sc; struct fnic_io_req *io_req; unsigned long flags; spinlock_t *io_lock; unsigned long start_time; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); return; } sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); WARN_ON_ONCE(!sc); if (!sc) { shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", fnic_fcpio_status_to_str(hdr_status), id); return; } io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); return; } start_time = io_req->start_time; if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { /* Abort and terminate completion of device reset req */ /* REVISIT : Add asserts about various flags */ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "dev reset abts cmpl recd. id %x status %s\n", id, fnic_fcpio_status_to_str(hdr_status)); CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_ABTS_STATUS(sc) = hdr_status; CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; if (io_req->abts_done) complete(io_req->abts_done); spin_unlock_irqrestore(io_lock, flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */ spin_unlock_irqrestore(io_lock, flags); return; } CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_ABTS_STATUS(sc) = hdr_status; CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); /* * If scsi_eh thread is blocked waiting for abts to complete, * signal completion to it. IO will be cleaned in the thread * else clean it in this context */ if (io_req->abts_done) { complete(io_req->abts_done); spin_unlock_irqrestore(io_lock, flags); } else { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl, completing IO\n"); CMD_SP(sc) = NULL; sc->result = (DID_ERROR << 16); spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); if (sc->scsi_done) { FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, (((u64)hdr_status << 40) | (u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); sc->scsi_done(sc); } } } else if (id & FNIC_TAG_DEV_RST) { /* Completion of device reset */ CMD_LR_STATUS(sc) = hdr_status; if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING; FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Terminate pending " "dev reset cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); return; } if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) { /* Need to wait for terminate completion */ spin_unlock_irqrestore(io_lock, flags); FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "dev reset cmpl recd after time out. " "id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); return; } CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE; CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "dev reset cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); if (io_req->dr_done) complete(io_req->dr_done); spin_unlock_irqrestore(io_lock, flags); } else { shost_printk(KERN_ERR, fnic->lport->host, "Unexpected itmf io state %s tag %x\n", fnic_ioreq_state_to_str(CMD_STATE(sc)), id); spin_unlock_irqrestore(io_lock, flags); } } /* * fnic_fcpio_cmpl_handler * Routine to service the cq for wq_copy */ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, unsigned int cq_index, struct fcpio_fw_req *desc) { struct fnic *fnic = vnic_dev_priv(vdev); switch (desc->hdr.type) { case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_ack_handler(fnic, cq_index, desc); break; case FCPIO_ICMND_CMPL: /* fw completed a command */ fnic_fcpio_icmnd_cmpl_handler(fnic, desc); break; case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ fnic_fcpio_itmf_cmpl_handler(fnic, desc); break; case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc); break; case FCPIO_RESET_CMPL: /* fw completed reset */ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc); break; default: FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "firmware completion type %d\n", desc->hdr.type); break; } return 0; } /* * fnic_wq_copy_cmpl_handler * Routine to process wq copy */ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) { unsigned int wq_work_done = 0; unsigned int i, cq_index; unsigned int cur_work_done; for (i = 0; i < fnic->wq_copy_count; i++) { cq_index = i + fnic->raw_wq_count + fnic->rq_count; cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], fnic_fcpio_cmpl_handler, copy_work_to_do); wq_work_done += cur_work_done; } return wq_work_done; } static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { unsigned int i; struct fnic_io_req *io_req; unsigned long flags = 0; struct scsi_cmnd *sc; spinlock_t *io_lock; unsigned long start_time = 0; for (i = 0; i < FNIC_MAX_IO_REQ; i++) { if (i == exclude_id) continue; sc = scsi_host_find_tag(fnic->lport->host, i); if (!sc) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { /* * We will be here only when FW completes reset * without sending completions for outstanding ios. */ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE; if (io_req && io_req->dr_done) complete(io_req->dr_done); else if (io_req && io_req->abts_done) complete(io_req->abts_done); spin_unlock_irqrestore(io_lock, flags); continue; } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { spin_unlock_irqrestore(io_lock, flags); continue; } if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto cleanup_scsi_cmd; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); /* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state */ start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" " DID_TRANSPORT_DISRUPTED\n"); /* Complete the command to SCSI */ if (sc->scsi_done) { FNIC_TRACE(fnic_cleanup_io, sc->device->host->host_no, i, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); sc->scsi_done(sc); } } } void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, struct fcpio_host_req *desc) { u32 id; struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic_io_req *io_req; struct scsi_cmnd *sc; unsigned long flags; spinlock_t *io_lock; unsigned long start_time = 0; /* get the tag reference */ fcpio_tag_id_dec(&desc->hdr.tag, &id); id &= FNIC_TAG_MASK; if (id >= FNIC_MAX_IO_REQ) return; sc = scsi_host_find_tag(fnic->lport->host, id); if (!sc) return; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); /* Get the IO context which this desc refers to */ io_req = (struct fnic_io_req *)CMD_SP(sc); /* fnic interrupts are turned off by now */ if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto wq_copy_cleanup_scsi_cmd; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); wq_copy_cleanup_scsi_cmd: sc->result = DID_NO_CONNECT << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:" " DID_NO_CONNECT\n"); if (sc->scsi_done) { FNIC_TRACE(fnic_wq_copy_cleanup_handler, sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); sc->scsi_done(sc); } } static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, u32 task_req, u8 *fc_lun, struct fnic_io_req *io_req) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { spin_unlock_irqrestore(host->host_lock, flags); return 1; } else atomic_inc(&fnic->in_flight); spin_unlock_irqrestore(host->host_lock, flags); spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); atomic_dec(&fnic->in_flight); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_queue_abort_io_req: failure: no descriptors\n"); return 1; } fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 0, task_req, tag, fc_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); atomic_dec(&fnic->in_flight); return 0; } static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { int tag; int abt_tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_exch_reset called portid 0x%06x\n", port_id); if (fnic->in_remove) return; for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { abt_tag = tag; sc = scsi_host_find_tag(fnic->lport->host, tag); if (!sc) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || io_req->port_id != port_id) { spin_unlock_irqrestore(io_lock, flags); continue; } if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_exch_reset dev rst not pending sc 0x%p\n", sc); spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to rport that went away */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); continue; } if (io_req->abts_done) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_rport_exch_reset: io_req->abts_done is set " "state is %s\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); } if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { shost_printk(KERN_ERR, fnic->lport->host, "rport_exch_reset " "IO not yet issued %p tag 0x%x flags " "%x state %d\n", sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); } old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); } BUG_ON(io_req->abts_done); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_reset_exch: Issuing abts\n"); spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset */ spin_lock_irqsave(io_lock, flags); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; spin_unlock_irqrestore(io_lock, flags); } else { spin_lock_irqsave(io_lock, flags); if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); } } } void fnic_terminate_rport_io(struct fc_rport *rport) { int tag; int abt_tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; struct fc_rport_libfc_priv *rdata = rport->dd_data; struct fc_lport *lport = rdata->local_port; struct fnic *fnic = lport_priv(lport); struct fc_rport *cmd_rport; enum fnic_ioreq_state old_ioreq_state; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io called" " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", rport->port_name, rport->node_name, rport, rport->port_id); if (fnic->in_remove) return; for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { abt_tag = tag; sc = scsi_host_find_tag(fnic->lport->host, tag); if (!sc) continue; cmd_rport = starget_to_rport(scsi_target(sc->device)); if (rport != cmd_rport) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || rport != cmd_rport) { spin_unlock_irqrestore(io_lock, flags); continue; } if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io dev rst not pending sc 0x%p\n", sc); spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to rport that went away */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); continue; } if (io_req->abts_done) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_terminate_rport_io: io_req->abts_done is set " "state is %s\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); } if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) { FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "fnic_terminate_rport_io " "IO not yet issued %p tag 0x%x flags " "%x state %d\n", sc, tag, CMD_FLAGS(sc), CMD_STATE(sc)); } old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); } BUG_ON(io_req->abts_done); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io: Issuing abts\n"); spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { /* * Revert the cmd state back to old state, if * it hasn't changed in between. This cmd will get * aborted later by scsi_eh, or cleaned up during * lun reset */ spin_lock_irqsave(io_lock, flags); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; spin_unlock_irqrestore(io_lock, flags); } else { spin_lock_irqsave(io_lock, flags); if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); } } } /* * This function is exported to SCSI for sending abort cmnds. * A SCSI IO is represented by a io_req in the driver. * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO. */ int fnic_abort_cmd(struct scsi_cmnd *sc) { struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; spinlock_t *io_lock; unsigned long flags; unsigned long start_time = 0; int ret = SUCCESS; u32 task_req = 0; struct scsi_lun fc_lun; int tag; DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); fnic = lport_priv(lp); rport = starget_to_rport(scsi_target(sc->device)); tag = sc->request->tag; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n", rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc)); CMD_FLAGS(sc) = FNIC_NO_FLAGS; if (lp->state != LPORT_ST_READY || !(lp->link_up)) { ret = FAILED; goto fnic_abort_cmd_end; } /* * Avoid a race between SCSI issuing the abort and the device * completing the command. * * If the command is already completed by the fw cmpl code, * we just return SUCCESS from here. This means that the abort * succeeded. In the SCSI ML, since the timeout for command has * happened, the completion wont actually complete the command * and it will be considered as an aborted command * * The CMD_SP will not be cleared except while holding io_req_lock. */ io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto fnic_abort_cmd_end; } io_req->abts_done = &tm_done; if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); goto wait_pending; } /* * Command is still pending, need to abort it * If the firmware completes the command after this point, * the completion wont be done till mid-layer, since abort * has already started. */ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; spin_unlock_irqrestore(io_lock, flags); /* * Check readiness of the remote port. If the path to remote * port is up, then send abts to the remote port to terminate * the IO. Else, just locally terminate the IO in the firmware */ if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; else task_req = FCPIO_ITMF_ABT_TASK_TERM; /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; spin_unlock_irqrestore(io_lock, flags); ret = FAILED; goto fnic_abort_cmd_end; } if (task_req == FCPIO_ITMF_ABT_TASK) CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; else CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; /* * We queued an abort IO, wait for its completion. * Once the firmware completes the abort command, it will * wake up this thread. */ wait_pending: wait_for_completion_timeout(&tm_done, msecs_to_jiffies (2 * fnic->config.ra_tov + fnic->config.ed_tov)); /* Check the abort status */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; ret = FAILED; goto fnic_abort_cmd_end; } io_req->abts_done = NULL; /* fw did not complete abort, timed out */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; ret = FAILED; goto fnic_abort_cmd_end; } /* * firmware completed the abort, check the status, * free the io_req irrespective of failure or success */ if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS) ret = FAILED; CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); fnic_abort_cmd_end: FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, sc->request->tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from abort cmd type %x %s\n", task_req, (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } static inline int fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_cmnd *sc, struct fnic_io_req *io_req) { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; spin_lock_irqsave(host->host_lock, intr_flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { spin_unlock_irqrestore(host->host_lock, intr_flags); return FAILED; } else atomic_inc(&fnic->in_flight); spin_unlock_irqrestore(host->host_lock, intr_flags); spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags); if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) free_wq_copy_descs(fnic, wq); if (!vnic_wq_copy_desc_avail(wq)) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "queue_dr_io_req failure - no descriptors\n"); ret = -EAGAIN; goto lr_io_req_end; } /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); atomic_dec(&fnic->in_flight); return ret; } /* * Clean up any pending aborts on the lun * For each outstanding IO on this lun, whose abort is not completed by fw, * issue a local abort. Wait for abort to complete. Return 0 if all commands * successfully aborted, 1 otherwise */ static int fnic_clean_pending_aborts(struct fnic *fnic, struct scsi_cmnd *lr_sc) { int tag, abt_tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; int ret = 0; struct scsi_cmnd *sc; struct scsi_lun fc_lun; struct scsi_device *lun_dev = lr_sc->device; DECLARE_COMPLETION_ONSTACK(tm_done); enum fnic_ioreq_state old_ioreq_state; for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to * this lun */ if (!sc || sc == lr_sc || sc->device != lun_dev) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || sc->device != lun_dev) { spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Found IO in %s on lun\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); continue; } if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) { FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "%s dev rst not pending sc 0x%p\n", __func__, sc); spin_unlock_irqrestore(io_lock, flags); continue; } old_ioreq_state = CMD_STATE(sc); /* * Any pending IO issued prior to reset is expected to be * in abts pending state, if not we need to set * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending. * When IO is completed, the IO will be handed over and * handled in this function. */ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; if (io_req->abts_done) shost_printk(KERN_ERR, fnic->lport->host, "%s: io_req->abts_done is set state is %s\n", __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); BUG_ON(io_req->abts_done); abt_tag = tag; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { abt_tag |= FNIC_TAG_DEV_RST; FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "%s: dev rst sc 0x%p\n", __func__, sc); } CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; io_req->abts_done = &tm_done; spin_unlock_irqrestore(io_lock, flags); /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; spin_unlock_irqrestore(io_lock, flags); ret = 1; goto clean_pending_aborts_end; } else { spin_lock_irqsave(io_lock, flags); if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); } CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; wait_for_completion_timeout(&tm_done, msecs_to_jiffies (fnic->config.ed_tov)); /* Recheck cmd state to check if it is now aborted */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; continue; } io_req->abts_done = NULL; /* if abort is still pending with fw, fail */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; ret = 1; goto clean_pending_aborts_end; } CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); /* walk again to check, if IOs are still pending in fw */ if (fnic_is_abts_pending(fnic, lr_sc)) ret = FAILED; clean_pending_aborts_end: return ret; } /** * fnic_scsi_host_start_tag * Allocates tagid from host's tag list **/ static inline int fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) { struct blk_queue_tag *bqt = fnic->lport->host->bqt; int tag, ret = SCSI_NO_TAG; BUG_ON(!bqt); if (!bqt) { pr_err("Tags are not supported\n"); goto end; } do { tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1); if (tag >= bqt->max_depth) { pr_err("Tag allocation failure\n"); goto end; } } while (test_and_set_bit(tag, bqt->tag_map)); bqt->tag_index[tag] = sc->request; sc->request->tag = tag; sc->tag = tag; if (!sc->request->special) sc->request->special = sc; ret = tag; end: return ret; } /** * fnic_scsi_host_end_tag * frees tag allocated by fnic_scsi_host_start_tag. **/ static inline void fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) { struct blk_queue_tag *bqt = fnic->lport->host->bqt; int tag = sc->request->tag; if (tag == SCSI_NO_TAG) return; BUG_ON(!bqt || !bqt->tag_index[tag]); if (!bqt) return; bqt->tag_index[tag] = NULL; clear_bit(tag, bqt->tag_map); return; } /* * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN * fail to get aborted. It calls driver's eh_device_reset with a SCSI command * on the LUN. */ int fnic_device_reset(struct scsi_cmnd *sc) { struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; int status; int ret = FAILED; spinlock_t *io_lock; unsigned long flags; unsigned long start_time = 0; struct scsi_lun fc_lun; int tag = 0; DECLARE_COMPLETION_ONSTACK(tm_done); int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ lp = shost_priv(sc->device->host); fnic = lport_priv(lp); rport = starget_to_rport(scsi_target(sc->device)); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n", rport->port_id, sc->device->lun, sc); if (lp->state != LPORT_ST_READY || !(lp->link_up)) goto fnic_device_reset_end; /* Check if remote port up */ if (fc_remote_port_chkready(rport)) goto fnic_device_reset_end; CMD_FLAGS(sc) = FNIC_DEVICE_RESET; /* Allocate tag if not present */ tag = sc->request->tag; if (unlikely(tag < 0)) { tag = fnic_scsi_host_start_tag(fnic, sc); if (unlikely(tag == SCSI_NO_TAG)) goto fnic_device_reset_end; tag_gen_flag = 1; } io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); /* * If there is a io_req attached to this command, then use it, * else allocate a new one. */ if (!io_req) { io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); goto fnic_device_reset_end; } memset(io_req, 0, sizeof(*io_req)); io_req->port_id = rport->port_id; CMD_SP(sc) = (char *)io_req; } io_req->dr_done = &tm_done; CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE; spin_unlock_irqrestore(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag); /* * issue the device reset, if enqueue failed, clean up the ioreq * and break assoc with scsi cmd */ if (fnic_queue_dr_io_req(fnic, sc, io_req)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->dr_done = NULL; goto fnic_device_reset_clean; } spin_lock_irqsave(io_lock, flags); CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED; spin_unlock_irqrestore(io_lock, flags); /* * Wait on the local completion for LUN reset. The io_req may be * freed while we wait since we hold no lock. */ wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { spin_unlock_irqrestore(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "io_req is null tag 0x%x sc 0x%p\n", tag, sc); goto fnic_device_reset_end; } io_req->dr_done = NULL; status = CMD_LR_STATUS(sc); /* * If lun reset not completed, bail out with failed. io_req * gets cleaned up during higher levels of EH */ if (status == FCPIO_INVALID_CODE) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset timed out\n"); CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; spin_unlock_irqrestore(io_lock, flags); int_to_scsilun(sc->device->lun, &fc_lun); /* * Issue abort and terminate on the device reset request. * If q'ing of the abort fails, retry issue it after a delay. */ while (1) { spin_lock_irqsave(io_lock, flags); if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) { spin_unlock_irqrestore(io_lock, flags); break; } spin_unlock_irqrestore(io_lock, flags); if (fnic_queue_abort_io_req(fnic, tag | FNIC_TAG_DEV_RST, FCPIO_ITMF_ABT_TASK_TERM, fc_lun.scsi_lun, io_req)) { wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); } else { spin_lock_irqsave(io_lock, flags); CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED; CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; io_req->abts_done = &tm_done; spin_unlock_irqrestore(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Abort and terminate issued on Device reset " "tag 0x%x sc 0x%p\n", tag, sc); break; } } while (1) { spin_lock_irqsave(io_lock, flags); if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { spin_unlock_irqrestore(io_lock, flags); wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); break; } else { io_req = (struct fnic_io_req *)CMD_SP(sc); io_req->abts_done = NULL; goto fnic_device_reset_clean; } } } else { spin_unlock_irqrestore(io_lock, flags); } /* Completed, but not successful, clean up the io_req, return fail */ if (status != FCPIO_SUCCESS) { spin_lock_irqsave(io_lock, flags); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset completed - failed\n"); io_req = (struct fnic_io_req *)CMD_SP(sc); goto fnic_device_reset_clean; } /* * Clean up any aborts on this lun that have still not * completed. If any of these fail, then LUN reset fails. * clean_pending_aborts cleans all cmds on this lun except * the lun reset cmd. If all cmds get cleaned, the lun reset * succeeds */ if (fnic_clean_pending_aborts(fnic, sc)) { spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset failed" " since could not abort all IOs\n"); goto fnic_device_reset_clean; } /* Clean lun reset command */ spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) /* Completed, and successful */ ret = SUCCESS; fnic_device_reset_clean: if (io_req) CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); if (io_req) { start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); } fnic_device_reset_end: FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, sc->request->tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); /* free tag if it is allocated */ if (unlikely(tag_gen_flag)) fnic_scsi_host_end_tag(fnic, sc); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } /* Clean up all IOs, clean up libFC local port */ int fnic_reset(struct Scsi_Host *shost) { struct fc_lport *lp; struct fnic *fnic; int ret = SUCCESS; lp = shost_priv(shost); fnic = lport_priv(lp); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_reset called\n"); /* * Reset local port, this will clean up libFC exchanges, * reset remote port sessions, and if link is up, begin flogi */ if (lp->tt.lport_reset(lp)) ret = FAILED; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from fnic reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); return ret; } /* * SCSI Error handling calls driver's eh_host_reset if all prior * error handling levels return FAILED. If host reset completes * successfully, and if link is up, then Fabric login begins. * * Host Reset is the highest level of error recovery. If this fails, then * host is offlined by SCSI. * */ int fnic_host_reset(struct scsi_cmnd *sc) { int ret; unsigned long wait_host_tmo; struct Scsi_Host *shost = sc->device->host; struct fc_lport *lp = shost_priv(shost); /* * If fnic_reset is successful, wait for fabric login to complete * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ ret = fnic_reset(shost); if (ret == SUCCESS) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; while (time_before(jiffies, wait_host_tmo)) { if ((lp->state == LPORT_ST_READY) && (lp->link_up)) { ret = SUCCESS; break; } ssleep(1); } } return ret; } /* * This fxn is called from libFC when host is removed */ void fnic_scsi_abort_io(struct fc_lport *lp) { int err = 0; unsigned long flags; enum fnic_state old_state; struct fnic *fnic = lport_priv(lp); DECLARE_COMPLETION_ONSTACK(remove_wait); /* Issue firmware reset for fnic, wait for reset to complete */ retry_fw_reset: spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { /* fw reset is in progress, poll for its completion */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); schedule_timeout(msecs_to_jiffies(100)); goto retry_fw_reset; } fnic->remove_wait = &remove_wait; old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); err = fnic_fw_reset_handler(fnic); if (err) { spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) fnic->state = old_state; fnic->remove_wait = NULL; spin_unlock_irqrestore(&fnic->fnic_lock, flags); return; } /* Wait for firmware reset to complete */ wait_for_completion_timeout(&remove_wait, msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->remove_wait = NULL; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_scsi_abort_io %s\n", (fnic->state == FNIC_IN_ETH_MODE) ? "SUCCESS" : "FAILED"); spin_unlock_irqrestore(&fnic->fnic_lock, flags); } /* * This fxn called from libFC to clean up driver IO state on link down */ void fnic_scsi_cleanup(struct fc_lport *lp) { unsigned long flags; enum fnic_state old_state; struct fnic *fnic = lport_priv(lp); /* issue fw reset */ retry_fw_reset: spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { /* fw reset is in progress, poll for its completion */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); schedule_timeout(msecs_to_jiffies(100)); goto retry_fw_reset; } old_state = fnic->state; fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (fnic_fw_reset_handler(fnic)) { spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) fnic->state = old_state; spin_unlock_irqrestore(&fnic->fnic_lock, flags); } } void fnic_empty_scsi_cleanup(struct fc_lport *lp) { } void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) { struct fnic *fnic = lport_priv(lp); /* Non-zero sid, nothing to do */ if (sid) goto call_fc_exch_mgr_reset; if (did) { fnic_rport_exch_reset(fnic, did); goto call_fc_exch_mgr_reset; } /* * sid = 0, did = 0 * link down or device being removed */ if (!fnic->in_remove) fnic_scsi_cleanup(lp); else fnic_scsi_abort_io(lp); /* call libFC exch mgr reset to reset its exchanges */ call_fc_exch_mgr_reset: fc_exch_mgr_reset(lp, sid, did); } /* * fnic_is_abts_pending() is a helper function that * walks through tag map to check if there is any IOs pending,if there is one, * then it returns 1 (true), otherwise 0 (false) * if @lr_sc is non NULL, then it checks IOs specific to particular LUN, * otherwise, it checks for all IOs. */ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) { int tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; int ret = 0; struct scsi_cmnd *sc; struct scsi_device *lun_dev = NULL; if (lr_sc) lun_dev = lr_sc->device; /* walk again to check, if IOs are still pending in fw */ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to * this lun */ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc))) continue; io_lock = fnic_io_lock_hash(fnic, sc); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req || sc->device != lun_dev) { spin_unlock_irqrestore(io_lock, flags); continue; } /* * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "Found IO in %s on lun\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { spin_unlock_irqrestore(io_lock, flags); ret = 1; continue; } } return ret; }
gpl-2.0
mythos234/zerolte-kernel-CM
drivers/pci/pci-stub.c
2392
2480
/* pci-stub - simple stub driver to reserve a pci device * * Copyright (C) 2008 Red Hat, Inc. * Author: * Chris Wright * * This work is licensed under the terms of the GNU GPL, version 2. * * Usage is simple, allocate a new id to the stub driver and bind the * device to it. For example: * * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub */ #include <linux/module.h> #include <linux/pci.h> static char ids[1024] __initdata; module_param_string(ids, ids, sizeof(ids), 0); MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is " "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\"" " and multiple comma separated entries can be specified"); static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) { dev_info(&dev->dev, "claimed by stub\n"); return 0; } static struct pci_driver stub_driver = { .name = "pci-stub", .id_table = NULL, /* only dynamic id's */ .probe = pci_stub_probe, }; static int __init pci_stub_init(void) { char *p, *id; int rc; rc = pci_register_driver(&stub_driver); if (rc) return rc; /* no ids passed actually */ if (ids[0] == '\0') return 0; /* add ids specified in the module parameter */ p = ids; while ((id = strsep(&p, ","))) { unsigned int vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class=0, class_mask=0; int fields; if (!strlen(id)) continue; fields = sscanf(id, "%x:%x:%x:%x:%x:%x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); if (fields < 2) { printk(KERN_WARNING "pci-stub: invalid id string \"%s\"\n", id); continue; } printk(KERN_INFO "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", vendor, device, subvendor, subdevice, class, class_mask); rc = pci_add_dynid(&stub_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) printk(KERN_WARNING "pci-stub: failed to add dynamic id (%d)\n", rc); } return 0; } static void __exit pci_stub_exit(void) { pci_unregister_driver(&stub_driver); } module_init(pci_stub_init); module_exit(pci_stub_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
gpl-2.0
AttiJeong98/Elf-Kernel_M250S_JB
drivers/net/wireless/ath/ath5k/mac80211-ops.c
2392
22479
/*- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * Copyright (c) 2010 Bruno Randolf <br1@einfach.org> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * */ #include <asm/unaligned.h> #include "base.h" #include "reg.h" extern int ath5k_modparam_nohwcrypt; /********************\ * Mac80211 functions * \********************/ static void ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ath5k_softc *sc = hw->priv; u16 qnum = skb_get_queue_mapping(skb); if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) { dev_kfree_skb_any(skb); return; } ath5k_tx_queue(hw, skb, &sc->txqs[qnum]); } static int ath5k_start(struct ieee80211_hw *hw) { return ath5k_init_hw(hw->priv); } static void ath5k_stop(struct ieee80211_hw *hw) { ath5k_stop_hw(hw->priv); } static int ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath5k_softc *sc = hw->priv; int ret; struct ath5k_vif *avf = (void *)vif->drv_priv; mutex_lock(&sc->lock); if ((vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) { ret = -ELNRNG; goto end; } /* Don't allow other interfaces if one ad-hoc is configured. * TODO: Fix the problems with ad-hoc and multiple other interfaces. * We would need to operate the HW in ad-hoc mode to allow TSF updates * for the IBSS, but this breaks with additional AP or STA interfaces * at the moment. */ if (sc->num_adhoc_vifs || (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) { ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n"); ret = -ELNRNG; goto end; } switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: avf->opmode = vif->type; break; default: ret = -EOPNOTSUPP; goto end; } sc->nvifs++; ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode); /* Assign the vap/adhoc to a beacon xmit slot. */ if ((avf->opmode == NL80211_IFTYPE_AP) || (avf->opmode == NL80211_IFTYPE_ADHOC) || (avf->opmode == NL80211_IFTYPE_MESH_POINT)) { int slot; WARN_ON(list_empty(&sc->bcbuf)); avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf, list); list_del(&avf->bbuf->list); avf->bslot = 0; for (slot = 0; slot < ATH_BCBUF; slot++) { if (!sc->bslot[slot]) { avf->bslot = slot; break; } } BUG_ON(sc->bslot[avf->bslot] != NULL); sc->bslot[avf->bslot] = vif; if (avf->opmode == NL80211_IFTYPE_AP) sc->num_ap_vifs++; else if (avf->opmode == NL80211_IFTYPE_ADHOC) sc->num_adhoc_vifs++; } /* Any MAC address is fine, all others are included through the * filter. */ memcpy(&sc->lladdr, vif->addr, ETH_ALEN); ath5k_hw_set_lladdr(sc->ah, vif->addr); memcpy(&avf->lladdr, vif->addr, ETH_ALEN); ath5k_update_bssid_mask_and_opmode(sc, vif); ret = 0; end: mutex_unlock(&sc->lock); return ret; } static void ath5k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath5k_softc *sc = hw->priv; struct ath5k_vif *avf = (void *)vif->drv_priv; unsigned int i; mutex_lock(&sc->lock); sc->nvifs--; if (avf->bbuf) { ath5k_txbuf_free_skb(sc, avf->bbuf); list_add_tail(&avf->bbuf->list, &sc->bcbuf); for (i = 0; i < ATH_BCBUF; i++) { if (sc->bslot[i] == vif) { sc->bslot[i] = NULL; break; } } avf->bbuf = NULL; } if (avf->opmode == NL80211_IFTYPE_AP) sc->num_ap_vifs--; else if (avf->opmode == NL80211_IFTYPE_ADHOC) sc->num_adhoc_vifs--; ath5k_update_bssid_mask_and_opmode(sc, NULL); mutex_unlock(&sc->lock); } /* * TODO: Phy disable/diversity etc */ static int ath5k_config(struct ieee80211_hw *hw, u32 changed) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ieee80211_conf *conf = &hw->conf; int ret = 0; int i; mutex_lock(&sc->lock); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ret = ath5k_chan_set(sc, conf->channel); if (ret < 0) goto unlock; } if ((changed & IEEE80211_CONF_CHANGE_POWER) && (sc->power_level != conf->power_level)) { sc->power_level = conf->power_level; /* Half dB steps */ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); } if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { ah->ah_retry_long = conf->long_frame_max_tx_count; ah->ah_retry_short = conf->short_frame_max_tx_count; for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) ath5k_hw_set_tx_retry_limits(ah, i); } /* TODO: * 1) Move this on config_interface and handle each case * separately eg. when we have only one STA vif, use * AR5K_ANTMODE_SINGLE_AP * * 2) Allow the user to change antenna mode eg. when only * one antenna is present * * 3) Allow the user to set default/tx antenna when possible * * 4) Default mode should handle 90% of the cases, together * with fixed a/b and single AP modes we should be able to * handle 99%. Sectored modes are extreme cases and i still * haven't found a usage for them. If we decide to support them, * then we must allow the user to set how many tx antennas we * have available */ ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); unlock: mutex_unlock(&sc->lock); return ret; } static void ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct ath5k_vif *avf = (void *)vif->drv_priv; struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ath_common *common = ath5k_hw_common(ah); unsigned long flags; mutex_lock(&sc->lock); if (changes & BSS_CHANGED_BSSID) { /* Cache for later use during resets */ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); common->curaid = 0; ath5k_hw_set_bssid(ah); mmiowb(); } if (changes & BSS_CHANGED_BEACON_INT) sc->bintval = bss_conf->beacon_int; if (changes & BSS_CHANGED_ERP_SLOT) { int slot_time; ah->ah_short_slot = bss_conf->use_short_slot; slot_time = ath5k_hw_get_default_slottime(ah) + 3 * ah->ah_coverage_class; ath5k_hw_set_ifs_intervals(ah, slot_time); } if (changes & BSS_CHANGED_ASSOC) { avf->assoc = bss_conf->assoc; if (bss_conf->assoc) sc->assoc = bss_conf->assoc; else sc->assoc = ath_any_vif_assoc(sc); if (sc->opmode == NL80211_IFTYPE_STATION) set_beacon_filter(hw, sc->assoc); ath5k_hw_set_ledstate(sc->ah, sc->assoc ? AR5K_LED_ASSOC : AR5K_LED_INIT); if (bss_conf->assoc) { ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "Bss Info ASSOC %d, bssid: %pM\n", bss_conf->aid, common->curbssid); common->curaid = bss_conf->aid; ath5k_hw_set_bssid(ah); /* Once ANI is available you would start it here */ } } if (changes & BSS_CHANGED_BEACON) { spin_lock_irqsave(&sc->block, flags); ath5k_beacon_update(hw, vif); spin_unlock_irqrestore(&sc->block, flags); } if (changes & BSS_CHANGED_BEACON_ENABLED) sc->enable_beacon = bss_conf->enable_beacon; if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) ath5k_beacon_config(sc); mutex_unlock(&sc->lock); } static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { u32 mfilt[2], val; u8 pos; struct netdev_hw_addr *ha; mfilt[0] = 0; mfilt[1] = 1; netdev_hw_addr_list_for_each(ha, mc_list) { /* calculate XOR of eight 6-bit values */ val = get_unaligned_le32(ha->addr + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = get_unaligned_le32(ha->addr + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); /* XXX: we might be able to just do this instead, * but not sure, needs testing, if we do use this we'd * neet to inform below to not reset the mcast */ /* ath5k_hw_set_mcast_filterindex(ah, * ha->addr[5]); */ } return ((u64)(mfilt[1]) << 32) | mfilt[0]; } /* * o always accept unicast, broadcast, and multicast traffic * o multicast traffic for all BSSIDs will be enabled if mac80211 * says it should be * o maintain current state of phy ofdm or phy cck error reception. * If the hardware detects any of these type of errors then * ath5k_hw_get_rx_filter() will pass to us the respective * hardware filters to be able to receive these type of frames. * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when scanning */ static void ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { #define SUPPORTED_FIF_FLAGS \ (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC) struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; u32 mfilt[2], rfilt; struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */ mutex_lock(&sc->lock); mfilt[0] = multicast; mfilt[1] = multicast >> 32; /* Only deal with supported flags */ changed_flags &= SUPPORTED_FIF_FLAGS; *new_flags &= SUPPORTED_FIF_FLAGS; /* If HW detects any phy or radar errors, leave those filters on. * Also, always enable Unicast, Broadcasts and Multicast * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | AR5K_RX_FILTER_MCAST); if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { if (*new_flags & FIF_PROMISC_IN_BSS) __set_bit(ATH_STAT_PROMISC, sc->status); else __clear_bit(ATH_STAT_PROMISC, sc->status); } if (test_bit(ATH_STAT_PROMISC, sc->status)) rfilt |= AR5K_RX_FILTER_PROM; /* Note, AR5K_RX_FILTER_MCAST is already enabled */ if (*new_flags & FIF_ALLMULTI) { mfilt[0] = ~0; mfilt[1] = ~0; } /* This is the best we can do */ if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) rfilt |= AR5K_RX_FILTER_PHYERR; /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons * and probes for any BSSID */ if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1)) rfilt |= AR5K_RX_FILTER_BEACON; /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not * set we should only pass on control frames for this * station. This needs testing. I believe right now this * enables *all* control frames, which is OK.. but * but we should see if we can improve on granularity */ if (*new_flags & FIF_CONTROL) rfilt |= AR5K_RX_FILTER_CONTROL; /* Additional settings per mode -- this is per ath5k */ /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ switch (sc->opmode) { case NL80211_IFTYPE_MESH_POINT: rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: rfilt |= AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_BEACON; break; case NL80211_IFTYPE_STATION: if (sc->assoc) rfilt |= AR5K_RX_FILTER_BEACON; default: break; } iter_data.hw_macaddr = NULL; iter_data.n_stas = 0; iter_data.need_set_hw_addr = false; ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, &iter_data); /* Set up RX Filter */ if (iter_data.n_stas > 1) { /* If you have multiple STA interfaces connected to * different APs, ARPs are not received (most of the time?) * Enabling PROMISC appears to fix that probem. */ rfilt |= AR5K_RX_FILTER_PROM; } /* Set filters */ ath5k_hw_set_rx_filter(ah, rfilt); /* Set multicast bits */ ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); /* Set the cached hw filter flags, this will later actually * be set in HW */ sc->filter_flags = rfilt; mutex_unlock(&sc->lock); } static int ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ath_common *common = ath5k_hw_common(ah); int ret = 0; if (ath5k_modparam_nohwcrypt) return -EOPNOTSUPP; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: break; case WLAN_CIPHER_SUITE_CCMP: if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM) break; return -EOPNOTSUPP; default: WARN_ON(1); return -EINVAL; } mutex_lock(&sc->lock); switch (cmd) { case SET_KEY: ret = ath_key_config(common, vif, sta, key); if (ret >= 0) { key->hw_key_idx = ret; /* push IV and Michael MIC generation to stack */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; if (key->cipher == WLAN_CIPHER_SUITE_CCMP) key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; ret = 0; } break; case DISABLE_KEY: ath_key_delete(common, key); break; default: ret = -EINVAL; } mmiowb(); mutex_unlock(&sc->lock); return ret; } static void ath5k_sw_scan_start(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; if (!sc->assoc) ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN); } static void ath5k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; ath5k_hw_set_ledstate(sc->ah, sc->assoc ? AR5K_LED_ASSOC : AR5K_LED_INIT); } static int ath5k_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct ath5k_softc *sc = hw->priv; /* Force update */ ath5k_hw_update_mib_counters(sc->ah); stats->dot11ACKFailureCount = sc->stats.ack_fail; stats->dot11RTSFailureCount = sc->stats.rts_fail; stats->dot11RTSSuccessCount = sc->stats.rts_ok; stats->dot11FCSErrorCount = sc->stats.fcs_error; return 0; } static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *params) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ath5k_txq_info qi; int ret = 0; if (queue >= ah->ah_capabilities.cap_queues.q_tx_num) return 0; mutex_lock(&sc->lock); ath5k_hw_get_tx_queueprops(ah, queue, &qi); qi.tqi_aifs = params->aifs; qi.tqi_cw_min = params->cw_min; qi.tqi_cw_max = params->cw_max; qi.tqi_burst_time = params->txop; ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "Configure tx [queue %d], " "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", queue, params->aifs, params->cw_min, params->cw_max, params->txop); if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) { ATH5K_ERR(sc, "Unable to update hardware queue %u!\n", queue); ret = -EIO; } else ath5k_hw_reset_tx_queue(ah, queue); mutex_unlock(&sc->lock); return ret; } static u64 ath5k_get_tsf(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; return ath5k_hw_get_tsf64(sc->ah); } static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct ath5k_softc *sc = hw->priv; ath5k_hw_set_tsf64(sc->ah, tsf); } static void ath5k_reset_tsf(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; /* * in IBSS mode we need to update the beacon timers too. * this will also reset the TSF if we call it with 0 */ if (sc->opmode == NL80211_IFTYPE_ADHOC) ath5k_beacon_update_timers(sc, 0); else ath5k_hw_reset_tsf(sc->ah); } static int ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct ath5k_softc *sc = hw->priv; struct ieee80211_conf *conf = &hw->conf; struct ath_common *common = ath5k_hw_common(sc->ah); struct ath_cycle_counters *cc = &common->cc_survey; unsigned int div = common->clockrate * 1000; if (idx != 0) return -ENOENT; spin_lock_bh(&common->cc_lock); ath_hw_cycle_counters_update(common); if (cc->cycles > 0) { sc->survey.channel_time += cc->cycles / div; sc->survey.channel_time_busy += cc->rx_busy / div; sc->survey.channel_time_rx += cc->rx_frame / div; sc->survey.channel_time_tx += cc->tx_frame / div; } memset(cc, 0, sizeof(*cc)); spin_unlock_bh(&common->cc_lock); memcpy(survey, &sc->survey, sizeof(*survey)); survey->channel = conf->channel; survey->noise = sc->ah->ah_noise_floor; survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_CHANNEL_TIME | SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX | SURVEY_INFO_CHANNEL_TIME_TX; return 0; } /** * ath5k_set_coverage_class - Set IEEE 802.11 coverage class * * @hw: struct ieee80211_hw pointer * @coverage_class: IEEE 802.11 coverage class number * * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given * coverage class. The values are persistent, they are restored after device * reset. */ static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) { struct ath5k_softc *sc = hw->priv; mutex_lock(&sc->lock); ath5k_hw_set_coverage_class(sc->ah, coverage_class); mutex_unlock(&sc->lock); } static int ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { struct ath5k_softc *sc = hw->priv; if (tx_ant == 1 && rx_ant == 1) ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A); else if (tx_ant == 2 && rx_ant == 2) ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B); else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3) ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT); else return -EINVAL; return 0; } static int ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct ath5k_softc *sc = hw->priv; switch (sc->ah->ah_ant_mode) { case AR5K_ANTMODE_FIXED_A: *tx_ant = 1; *rx_ant = 1; break; case AR5K_ANTMODE_FIXED_B: *tx_ant = 2; *rx_ant = 2; break; case AR5K_ANTMODE_DEFAULT: *tx_ant = 3; *rx_ant = 3; break; } return 0; } static void ath5k_get_ringparam(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { struct ath5k_softc *sc = hw->priv; *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max; *tx_max = ATH5K_TXQ_LEN_MAX; *rx = *rx_max = ATH_RXBUF; } static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx) { struct ath5k_softc *sc = hw->priv; u16 qnum; /* only support setting tx ring size for now */ if (rx != ATH_RXBUF) return -EINVAL; /* restrict tx ring size min/max */ if (!tx || tx > ATH5K_TXQ_LEN_MAX) return -EINVAL; for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) { if (!sc->txqs[qnum].setup) continue; if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN || sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX) continue; sc->txqs[qnum].txq_max = tx; if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max) ieee80211_stop_queue(hw, sc->txqs[qnum].qnum); } return 0; } const struct ieee80211_ops ath5k_hw_ops = { .tx = ath5k_tx, .start = ath5k_start, .stop = ath5k_stop, .add_interface = ath5k_add_interface, /* .change_interface = not implemented */ .remove_interface = ath5k_remove_interface, .config = ath5k_config, .bss_info_changed = ath5k_bss_info_changed, .prepare_multicast = ath5k_prepare_multicast, .configure_filter = ath5k_configure_filter, /* .set_tim = not implemented */ .set_key = ath5k_set_key, /* .update_tkip_key = not implemented */ /* .hw_scan = not implemented */ .sw_scan_start = ath5k_sw_scan_start, .sw_scan_complete = ath5k_sw_scan_complete, .get_stats = ath5k_get_stats, /* .get_tkip_seq = not implemented */ /* .set_frag_threshold = not implemented */ /* .set_rts_threshold = not implemented */ /* .sta_add = not implemented */ /* .sta_remove = not implemented */ /* .sta_notify = not implemented */ .conf_tx = ath5k_conf_tx, .get_tsf = ath5k_get_tsf, .set_tsf = ath5k_set_tsf, .reset_tsf = ath5k_reset_tsf, /* .tx_last_beacon = not implemented */ /* .ampdu_action = not needed */ .get_survey = ath5k_get_survey, .set_coverage_class = ath5k_set_coverage_class, /* .rfkill_poll = not implemented */ /* .flush = not implemented */ /* .channel_switch = not implemented */ /* .napi_poll = not implemented */ .set_antenna = ath5k_set_antenna, .get_antenna = ath5k_get_antenna, .set_ringparam = ath5k_set_ringparam, .get_ringparam = ath5k_get_ringparam, };
gpl-2.0
goodhanrry/N915S_goodHanrry_kernel
fs/nfs/sysctl.c
2904
1267
/* * linux/fs/nfs/sysctl.c * * Sysctl interface to NFS parameters */ #include <linux/types.h> #include <linux/linkage.h> #include <linux/ctype.h> #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nfs_fs.h> static struct ctl_table_header *nfs_callback_sysctl_table; static ctl_table nfs_cb_sysctls[] = { { .procname = "nfs_mountpoint_timeout", .data = &nfs_mountpoint_expiry_timeout, .maxlen = sizeof(nfs_mountpoint_expiry_timeout), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nfs_congestion_kb", .data = &nfs_congestion_kb, .maxlen = sizeof(nfs_congestion_kb), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static ctl_table nfs_cb_sysctl_dir[] = { { .procname = "nfs", .mode = 0555, .child = nfs_cb_sysctls, }, { } }; static ctl_table nfs_cb_sysctl_root[] = { { .procname = "fs", .mode = 0555, .child = nfs_cb_sysctl_dir, }, { } }; int nfs_register_sysctl(void) { nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root); if (nfs_callback_sysctl_table == NULL) return -ENOMEM; return 0; } void nfs_unregister_sysctl(void) { unregister_sysctl_table(nfs_callback_sysctl_table); nfs_callback_sysctl_table = NULL; }
gpl-2.0
draekko/android_kernel_ba2x_2.0
block/blk-integrity.c
2904
11710
/* * blk-integrity.c - Block layer data integrity extensions * * Copyright (C) 2007, 2008 Oracle Corporation * Written by: Martin K. Petersen <martin.petersen@oracle.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * */ #include <linux/blkdev.h> #include <linux/mempool.h> #include <linux/bio.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include "blk.h" static struct kmem_cache *integrity_cachep; static const char *bi_unsupported_name = "unsupported"; /** * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements * @q: request queue * @bio: bio with integrity metadata attached * * Description: Returns the number of elements required in a * scatterlist corresponding to the integrity metadata in a bio. */ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) { struct bio_vec *iv, *ivprv = NULL; unsigned int segments = 0; unsigned int seg_size = 0; unsigned int i = 0; bio_for_each_integrity_vec(iv, bio, i) { if (ivprv) { if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) goto new_segment; if (seg_size + iv->bv_len > queue_max_segment_size(q)) goto new_segment; seg_size += iv->bv_len; } else { new_segment: segments++; seg_size = iv->bv_len; } ivprv = iv; } return segments; } EXPORT_SYMBOL(blk_rq_count_integrity_sg); /** * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist * @q: request queue * @bio: bio with integrity metadata attached * @sglist: target scatterlist * * Description: Map the integrity vectors in request into a * scatterlist. The scatterlist must be big enough to hold all * elements. I.e. sized using blk_rq_count_integrity_sg(). */ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist) { struct bio_vec *iv, *ivprv = NULL; struct scatterlist *sg = NULL; unsigned int segments = 0; unsigned int i = 0; bio_for_each_integrity_vec(iv, bio, i) { if (ivprv) { if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) goto new_segment; if (sg->length + iv->bv_len > queue_max_segment_size(q)) goto new_segment; sg->length += iv->bv_len; } else { new_segment: if (!sg) sg = sglist; else { sg->page_link &= ~0x02; sg = sg_next(sg); } sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); segments++; } ivprv = iv; } if (sg) sg_mark_end(sg); return segments; } EXPORT_SYMBOL(blk_rq_map_integrity_sg); /** * blk_integrity_compare - Compare integrity profile of two disks * @gd1: Disk to compare * @gd2: Disk to compare * * Description: Meta-devices like DM and MD need to verify that all * sub-devices use the same integrity format before advertising to * upper layers that they can send/receive integrity metadata. This * function can be used to check whether two gendisk devices have * compatible integrity formats. */ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) { struct blk_integrity *b1 = gd1->integrity; struct blk_integrity *b2 = gd2->integrity; if (!b1 && !b2) return 0; if (!b1 || !b2) return -1; if (b1->sector_size != b2->sector_size) { printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->sector_size, b2->sector_size); return -1; } if (b1->tuple_size != b2->tuple_size) { printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->tuple_size, b2->tuple_size); return -1; } if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->tag_size, b2->tag_size); return -1; } if (strcmp(b1->name, b2->name)) { printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, gd1->disk_name, gd2->disk_name, b1->name, b2->name); return -1; } return 0; } EXPORT_SYMBOL(blk_integrity_compare); int blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) { if (blk_integrity_rq(req) != blk_integrity_rq(next)) return -1; if (req->nr_integrity_segments + next->nr_integrity_segments > q->limits.max_integrity_segments) return -1; return 0; } EXPORT_SYMBOL(blk_integrity_merge_rq); int blk_integrity_merge_bio(struct request_queue *q, struct request *req, struct bio *bio) { int nr_integrity_segs; struct bio *next = bio->bi_next; bio->bi_next = NULL; nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); bio->bi_next = next; if (req->nr_integrity_segments + nr_integrity_segs > q->limits.max_integrity_segments) return -1; req->nr_integrity_segments += nr_integrity_segs; return 0; } EXPORT_SYMBOL(blk_integrity_merge_bio); struct integrity_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_integrity *, char *); ssize_t (*store)(struct blk_integrity *, const char *, size_t); }; static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); struct integrity_sysfs_entry *entry = container_of(attr, struct integrity_sysfs_entry, attr); return entry->show(bi, page); } static ssize_t integrity_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t count) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); struct integrity_sysfs_entry *entry = container_of(attr, struct integrity_sysfs_entry, attr); ssize_t ret = 0; if (entry->store) ret = entry->store(bi, page, count); return ret; } static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) { if (bi != NULL && bi->name != NULL) return sprintf(page, "%s\n", bi->name); else return sprintf(page, "none\n"); } static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) { if (bi != NULL) return sprintf(page, "%u\n", bi->tag_size); else return sprintf(page, "0\n"); } static ssize_t integrity_read_store(struct blk_integrity *bi, const char *page, size_t count) { char *p = (char *) page; unsigned long val = simple_strtoul(p, &p, 10); if (val) bi->flags |= INTEGRITY_FLAG_READ; else bi->flags &= ~INTEGRITY_FLAG_READ; return count; } static ssize_t integrity_read_show(struct blk_integrity *bi, char *page) { return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_READ) != 0); } static ssize_t integrity_write_store(struct blk_integrity *bi, const char *page, size_t count) { char *p = (char *) page; unsigned long val = simple_strtoul(p, &p, 10); if (val) bi->flags |= INTEGRITY_FLAG_WRITE; else bi->flags &= ~INTEGRITY_FLAG_WRITE; return count; } static ssize_t integrity_write_show(struct blk_integrity *bi, char *page) { return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_WRITE) != 0); } static struct integrity_sysfs_entry integrity_format_entry = { .attr = { .name = "format", .mode = S_IRUGO }, .show = integrity_format_show, }; static struct integrity_sysfs_entry integrity_tag_size_entry = { .attr = { .name = "tag_size", .mode = S_IRUGO }, .show = integrity_tag_size_show, }; static struct integrity_sysfs_entry integrity_read_entry = { .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, .show = integrity_read_show, .store = integrity_read_store, }; static struct integrity_sysfs_entry integrity_write_entry = { .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR }, .show = integrity_write_show, .store = integrity_write_store, }; static struct attribute *integrity_attrs[] = { &integrity_format_entry.attr, &integrity_tag_size_entry.attr, &integrity_read_entry.attr, &integrity_write_entry.attr, NULL, }; static const struct sysfs_ops integrity_ops = { .show = &integrity_attr_show, .store = &integrity_attr_store, }; static int __init blk_dev_integrity_init(void) { integrity_cachep = kmem_cache_create("blkdev_integrity", sizeof(struct blk_integrity), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_dev_integrity_init); static void blk_integrity_release(struct kobject *kobj) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); kmem_cache_free(integrity_cachep, bi); } static struct kobj_type integrity_ktype = { .default_attrs = integrity_attrs, .sysfs_ops = &integrity_ops, .release = blk_integrity_release, }; bool blk_integrity_is_initialized(struct gendisk *disk) { struct blk_integrity *bi = blk_get_integrity(disk); return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); } EXPORT_SYMBOL(blk_integrity_is_initialized); /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware * @template: optional integrity profile to register * * Description: When a device needs to advertise itself as being able * to send/receive integrity metadata it must use this function to * register the capability with the block layer. The template is a * blk_integrity struct with values appropriate for the underlying * hardware. If template is NULL the new profile is allocated but * not filled out. See Documentation/block/data-integrity.txt. */ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) { struct blk_integrity *bi; BUG_ON(disk == NULL); if (disk->integrity == NULL) { bi = kmem_cache_alloc(integrity_cachep, GFP_KERNEL | __GFP_ZERO); if (!bi) return -1; if (kobject_init_and_add(&bi->kobj, &integrity_ktype, &disk_to_dev(disk)->kobj, "%s", "integrity")) { kmem_cache_free(integrity_cachep, bi); return -1; } kobject_uevent(&bi->kobj, KOBJ_ADD); bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; bi->sector_size = queue_logical_block_size(disk->queue); disk->integrity = bi; } else bi = disk->integrity; /* Use the provided profile as template */ if (template != NULL) { bi->name = template->name; bi->generate_fn = template->generate_fn; bi->verify_fn = template->verify_fn; bi->tuple_size = template->tuple_size; bi->set_tag_fn = template->set_tag_fn; bi->get_tag_fn = template->get_tag_fn; bi->tag_size = template->tag_size; } else bi->name = bi_unsupported_name; return 0; } EXPORT_SYMBOL(blk_integrity_register); /** * blk_integrity_unregister - Remove block integrity profile * @disk: disk whose integrity profile to deallocate * * Description: This function frees all memory used by the block * integrity profile. To be called at device teardown. */ void blk_integrity_unregister(struct gendisk *disk) { struct blk_integrity *bi; if (!disk || !disk->integrity) return; bi = disk->integrity; kobject_uevent(&bi->kobj, KOBJ_REMOVE); kobject_del(&bi->kobj); kobject_put(&bi->kobj); disk->integrity = NULL; } EXPORT_SYMBOL(blk_integrity_unregister);
gpl-2.0
kannu1994/sgs2_kernel
drivers/net/wireless/rtlwifi/rtl8192se/table.c
3160
13995
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * * Created on 2010/ 5/18, 1:41 *****************************************************************************/ #include "table.h" u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH] = { 0x01c, 0x07000000, 0x800, 0x00040000, 0x804, 0x00008003, 0x808, 0x0000fc00, 0x80c, 0x0000000a, 0x810, 0x10005088, 0x814, 0x020c3d10, 0x818, 0x00200185, 0x81c, 0x00000000, 0x820, 0x01000000, 0x824, 0x00390004, 0x828, 0x01000000, 0x82c, 0x00390004, 0x830, 0x00000004, 0x834, 0x00690200, 0x838, 0x00000004, 0x83c, 0x00690200, 0x840, 0x00010000, 0x844, 0x00010000, 0x848, 0x00000000, 0x84c, 0x00000000, 0x850, 0x00000000, 0x854, 0x00000000, 0x858, 0x48484848, 0x85c, 0x65a965a9, 0x860, 0x0f7f0130, 0x864, 0x0f7f0130, 0x868, 0x0f7f0130, 0x86c, 0x0f7f0130, 0x870, 0x03000700, 0x874, 0x03000300, 0x878, 0x00020002, 0x87c, 0x004f0201, 0x880, 0xa8300ac1, 0x884, 0x00000058, 0x888, 0x00000008, 0x88c, 0x00000004, 0x890, 0x00000000, 0x894, 0xfffffffe, 0x898, 0x40302010, 0x89c, 0x00706050, 0x8b0, 0x00000000, 0x8e0, 0x00000000, 0x8e4, 0x00000000, 0xe00, 0x30333333, 0xe04, 0x2a2d2e2f, 0xe08, 0x00003232, 0xe10, 0x30333333, 0xe14, 0x2a2d2e2f, 0xe18, 0x30333333, 0xe1c, 0x2a2d2e2f, 0xe30, 0x01007c00, 0xe34, 0x01004800, 0xe38, 0x1000dc1f, 0xe3c, 0x10008c1f, 0xe40, 0x021400a0, 0xe44, 0x281600a0, 0xe48, 0xf8000001, 0xe4c, 0x00002910, 0xe50, 0x01007c00, 0xe54, 0x01004800, 0xe58, 0x1000dc1f, 0xe5c, 0x10008c1f, 0xe60, 0x021400a0, 0xe64, 0x281600a0, 0xe6c, 0x00002910, 0xe70, 0x31ed92fb, 0xe74, 0x361536fb, 0xe78, 0x361536fb, 0xe7c, 0x361536fb, 0xe80, 0x361536fb, 0xe84, 0x000d92fb, 0xe88, 0x000d92fb, 0xe8c, 0x31ed92fb, 0xed0, 0x31ed92fb, 0xed4, 0x31ed92fb, 0xed8, 0x000d92fb, 0xedc, 0x000d92fb, 0xee0, 0x000d92fb, 0xee4, 0x015e5448, 0xee8, 0x21555448, 0x900, 0x00000000, 0x904, 0x00000023, 0x908, 0x00000000, 0x90c, 0x01121313, 0xa00, 0x00d047c8, 0xa04, 0x80ff0008, 0xa08, 0x8ccd8300, 0xa0c, 0x2e62120f, 0xa10, 0x9500bb78, 0xa14, 0x11144028, 0xa18, 0x00881117, 0xa1c, 0x89140f00, 0xa20, 0x1a1b0000, 0xa24, 0x090e1317, 0xa28, 0x00000204, 0xa2c, 0x10d30000, 0xc00, 0x40071d40, 0xc04, 0x00a05633, 0xc08, 0x000000e4, 0xc0c, 0x6c6c6c6c, 0xc10, 0x08800000, 0xc14, 0x40000100, 0xc18, 0x08000000, 0xc1c, 0x40000100, 0xc20, 0x08000000, 0xc24, 0x40000100, 0xc28, 0x08000000, 0xc2c, 0x40000100, 0xc30, 0x6de9ac44, 0xc34, 0x469652cf, 0xc38, 0x49795994, 0xc3c, 0x0a979764, 0xc40, 0x1f7c403f, 0xc44, 0x000100b7, 0xc48, 0xec020000, 0xc4c, 0x007f037f, 0xc50, 0x69543420, 0xc54, 0x433c0094, 0xc58, 0x69543420, 0xc5c, 0x433c0094, 0xc60, 0x69543420, 0xc64, 0x433c0094, 0xc68, 0x69543420, 0xc6c, 0x433c0094, 0xc70, 0x2c7f000d, 0xc74, 0x0186155b, 0xc78, 0x0000001f, 0xc7c, 0x00b91612, 0xc80, 0x40000100, 0xc84, 0x20f60000, 0xc88, 0x20000080, 0xc8c, 0x20200000, 0xc90, 0x40000100, 0xc94, 0x00000000, 0xc98, 0x40000100, 0xc9c, 0x00000000, 0xca0, 0x00492492, 0xca4, 0x00000000, 0xca8, 0x00000000, 0xcac, 0x00000000, 0xcb0, 0x00000000, 0xcb4, 0x00000000, 0xcb8, 0x00000000, 0xcbc, 0x28000000, 0xcc0, 0x00000000, 0xcc4, 0x00000000, 0xcc8, 0x00000000, 0xccc, 0x00000000, 0xcd0, 0x00000000, 0xcd4, 0x00000000, 0xcd8, 0x64b22427, 0xcdc, 0x00766932, 0xce0, 0x00222222, 0xce4, 0x00000000, 0xce8, 0x37644302, 0xcec, 0x2f97d40c, 0xd00, 0x00000750, 0xd04, 0x00000403, 0xd08, 0x0000907f, 0xd0c, 0x00000001, 0xd10, 0xa0633333, 0xd14, 0x33333c63, 0xd18, 0x6a8f5b6b, 0xd1c, 0x00000000, 0xd20, 0x00000000, 0xd24, 0x00000000, 0xd28, 0x00000000, 0xd2c, 0xcc979975, 0xd30, 0x00000000, 0xd34, 0x00000000, 0xd38, 0x00000000, 0xd3c, 0x00027293, 0xd40, 0x00000000, 0xd44, 0x00000000, 0xd48, 0x00000000, 0xd50, 0x6437140a, 0xd54, 0x024dbd02, 0xd58, 0x00000000, 0xd5c, 0x30032064, 0xd60, 0x4653de68, 0xd64, 0x00518a3c, 0xd68, 0x00002101, 0xf14, 0x00000003, 0xf4c, 0x00000000, 0xf00, 0x00000300, }; u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH] = { 0x844, 0xffffffff, 0x00010000, 0x804, 0x0000000f, 0x00000001, 0x824, 0x00f0000f, 0x00300004, 0x82c, 0x00f0000f, 0x00100002, 0x870, 0x04000000, 0x00000001, 0x864, 0x00000400, 0x00000000, 0x878, 0x000f000f, 0x00000002, 0xe74, 0x0f000000, 0x00000002, 0xe78, 0x0f000000, 0x00000002, 0xe7c, 0x0f000000, 0x00000002, 0xe80, 0x0f000000, 0x00000002, 0x90c, 0x000000ff, 0x00000011, 0xc04, 0x000000ff, 0x00000011, 0xd04, 0x0000000f, 0x00000001, 0x1f4, 0xffff0000, 0x00007777, 0x234, 0xf8000000, 0x0000000a, }; u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH] = { 0x804, 0x0000000f, 0x00000003, 0x824, 0x00f0000f, 0x00300004, 0x82c, 0x00f0000f, 0x00300002, 0x870, 0x04000000, 0x00000001, 0x864, 0x00000400, 0x00000000, 0x878, 0x000f000f, 0x00000002, 0xe74, 0x0f000000, 0x00000002, 0xe78, 0x0f000000, 0x00000002, 0xe7c, 0x0f000000, 0x00000002, 0xe80, 0x0f000000, 0x00000002, 0x90c, 0x000000ff, 0x00000011, 0xc04, 0x000000ff, 0x00000033, 0xd04, 0x0000000f, 0x00000003, 0x1f4, 0xffff0000, 0x00007777, 0x234, 0xf8000000, 0x0000000a, }; u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH] = { 0xe00, 0xffffffff, 0x06090909, 0xe04, 0xffffffff, 0x00030406, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x0a0c0d0e, 0xe14, 0xffffffff, 0x04070809, 0xe18, 0xffffffff, 0x0a0c0d0e, 0xe1c, 0xffffffff, 0x04070809, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02040404, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02040404, 0xe1c, 0xffffffff, 0x00000002, 0xe00, 0xffffffff, 0x04040404, 0xe04, 0xffffffff, 0x00020204, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02040404, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02040404, 0xe1c, 0xffffffff, 0x00000002, 0xe00, 0xffffffff, 0x02020202, 0xe04, 0xffffffff, 0x00020202, 0xe08, 0x0000ff00, 0x00000000, 0xe10, 0xffffffff, 0x02020202, 0xe14, 0xffffffff, 0x00000002, 0xe18, 0xffffffff, 0x02020202, 0xe1c, 0xffffffff, 0x00000002, }; u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00030250, 0x002, 0x00010000, 0x010, 0x0008000f, 0x011, 0x000231fc, 0x010, 0x000c000f, 0x011, 0x0003f9f8, 0x010, 0x0002000f, 0x011, 0x00020101, 0x014, 0x0001093e, 0x014, 0x0009093e, 0x015, 0x0000f8f4, 0x017, 0x000f6500, 0x01a, 0x00013056, 0x01b, 0x00060000, 0x01c, 0x00000300, 0x01e, 0x00031059, 0x021, 0x00054000, 0x022, 0x0000083c, 0x023, 0x00001558, 0x024, 0x00000060, 0x025, 0x00022583, 0x026, 0x0000f200, 0x027, 0x000eacf1, 0x028, 0x0009bd54, 0x029, 0x00004582, 0x02a, 0x00000001, 0x02b, 0x00021334, 0x02a, 0x00000000, 0x02b, 0x0000000a, 0x02a, 0x00000001, 0x02b, 0x00000808, 0x02b, 0x00053333, 0x02c, 0x0000000c, 0x02a, 0x00000002, 0x02b, 0x00000808, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000003, 0x02b, 0x00000808, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000004, 0x02b, 0x00000808, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000005, 0x02b, 0x00000709, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x00000006, 0x02b, 0x00000709, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x00000007, 0x02b, 0x00000709, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x00000008, 0x02b, 0x00000709, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x00000009, 0x02b, 0x0000060a, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000a, 0x02b, 0x0000060a, 0x02b, 0x0005b333, 0x02c, 0x0000000d, 0x02a, 0x0000000b, 0x02b, 0x0000060a, 0x02b, 0x00063333, 0x02c, 0x0000000d, 0x02a, 0x0000000c, 0x02b, 0x0000060a, 0x02b, 0x0006b333, 0x02c, 0x0000000d, 0x02a, 0x0000000d, 0x02b, 0x0000050b, 0x02b, 0x00053333, 0x02c, 0x0000000d, 0x02a, 0x0000000e, 0x02b, 0x0000050b, 0x02b, 0x00066623, 0x02c, 0x0000001a, 0x02a, 0x000e4000, 0x030, 0x00020000, 0x031, 0x000b9631, 0x032, 0x0000130d, 0x033, 0x00000187, 0x013, 0x00019e6c, 0x013, 0x00015e94, 0x000, 0x00010159, 0x018, 0x0000f401, 0x0fe, 0x00000000, 0x01e, 0x0003105b, 0x0fe, 0x00000000, 0x000, 0x00030159, 0x010, 0x0004000f, 0x011, 0x000203f9, }; u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00001041, 0x002, 0x00011000, 0x005, 0x00080fc0, 0x007, 0x000fc803, 0x013, 0x00017cb0, 0x013, 0x00011cc0, 0x013, 0x0000dc60, 0x013, 0x00008c60, 0x013, 0x00004450, 0x013, 0x00000020, }; u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00001041, 0x002, 0x00011000, 0x005, 0x00080fc0, 0x007, 0x000fc803, }; u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH] = { 0x020, 0x00000035, 0x048, 0x0000000e, 0x049, 0x000000f0, 0x04a, 0x00000077, 0x04b, 0x00000083, 0x0b5, 0x00000021, 0x0dc, 0x000000ff, 0x0dd, 0x000000ff, 0x0de, 0x000000ff, 0x0df, 0x000000ff, 0x116, 0x00000000, 0x117, 0x00000000, 0x118, 0x00000000, 0x119, 0x00000000, 0x11a, 0x00000000, 0x11b, 0x00000000, 0x11c, 0x00000000, 0x11d, 0x00000000, 0x160, 0x0000000b, 0x161, 0x0000000b, 0x162, 0x0000000b, 0x163, 0x0000000b, 0x164, 0x0000000b, 0x165, 0x0000000b, 0x166, 0x0000000b, 0x167, 0x0000000b, 0x168, 0x0000000b, 0x169, 0x0000000b, 0x16a, 0x0000000b, 0x16b, 0x0000000b, 0x16c, 0x0000000b, 0x16d, 0x0000000b, 0x16e, 0x0000000b, 0x16f, 0x0000000b, 0x170, 0x0000000b, 0x171, 0x0000000b, 0x172, 0x0000000b, 0x173, 0x0000000b, 0x174, 0x0000000b, 0x175, 0x0000000b, 0x176, 0x0000000b, 0x177, 0x0000000b, 0x178, 0x0000000b, 0x179, 0x0000000b, 0x17a, 0x0000000b, 0x17b, 0x0000000b, 0x17c, 0x0000000b, 0x17d, 0x0000000b, 0x17e, 0x0000000b, 0x17f, 0x0000000b, 0x236, 0x0000000c, 0x503, 0x00000022, 0x560, 0x00000000, }; u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH] = { 0xc78, 0x7f000001, 0xc78, 0x7f010001, 0xc78, 0x7e020001, 0xc78, 0x7d030001, 0xc78, 0x7c040001, 0xc78, 0x7b050001, 0xc78, 0x7a060001, 0xc78, 0x79070001, 0xc78, 0x78080001, 0xc78, 0x77090001, 0xc78, 0x760a0001, 0xc78, 0x750b0001, 0xc78, 0x740c0001, 0xc78, 0x730d0001, 0xc78, 0x720e0001, 0xc78, 0x710f0001, 0xc78, 0x70100001, 0xc78, 0x6f110001, 0xc78, 0x6f120001, 0xc78, 0x6e130001, 0xc78, 0x6d140001, 0xc78, 0x6d150001, 0xc78, 0x6c160001, 0xc78, 0x6b170001, 0xc78, 0x6a180001, 0xc78, 0x6a190001, 0xc78, 0x691a0001, 0xc78, 0x681b0001, 0xc78, 0x671c0001, 0xc78, 0x661d0001, 0xc78, 0x651e0001, 0xc78, 0x641f0001, 0xc78, 0x63200001, 0xc78, 0x4c210001, 0xc78, 0x4b220001, 0xc78, 0x4a230001, 0xc78, 0x49240001, 0xc78, 0x48250001, 0xc78, 0x47260001, 0xc78, 0x46270001, 0xc78, 0x45280001, 0xc78, 0x44290001, 0xc78, 0x2c2a0001, 0xc78, 0x2b2b0001, 0xc78, 0x2a2c0001, 0xc78, 0x292d0001, 0xc78, 0x282e0001, 0xc78, 0x272f0001, 0xc78, 0x26300001, 0xc78, 0x25310001, 0xc78, 0x24320001, 0xc78, 0x23330001, 0xc78, 0x22340001, 0xc78, 0x09350001, 0xc78, 0x08360001, 0xc78, 0x07370001, 0xc78, 0x06380001, 0xc78, 0x05390001, 0xc78, 0x043a0001, 0xc78, 0x033b0001, 0xc78, 0x023c0001, 0xc78, 0x013d0001, 0xc78, 0x003e0001, 0xc78, 0x003f0001, 0xc78, 0x7f400001, 0xc78, 0x7f410001, 0xc78, 0x7e420001, 0xc78, 0x7d430001, 0xc78, 0x7c440001, 0xc78, 0x7b450001, 0xc78, 0x7a460001, 0xc78, 0x79470001, 0xc78, 0x78480001, 0xc78, 0x77490001, 0xc78, 0x764a0001, 0xc78, 0x754b0001, 0xc78, 0x744c0001, 0xc78, 0x734d0001, 0xc78, 0x724e0001, 0xc78, 0x714f0001, 0xc78, 0x70500001, 0xc78, 0x6f510001, 0xc78, 0x6f520001, 0xc78, 0x6e530001, 0xc78, 0x6d540001, 0xc78, 0x6d550001, 0xc78, 0x6c560001, 0xc78, 0x6b570001, 0xc78, 0x6a580001, 0xc78, 0x6a590001, 0xc78, 0x695a0001, 0xc78, 0x685b0001, 0xc78, 0x675c0001, 0xc78, 0x665d0001, 0xc78, 0x655e0001, 0xc78, 0x645f0001, 0xc78, 0x63600001, 0xc78, 0x4c610001, 0xc78, 0x4b620001, 0xc78, 0x4a630001, 0xc78, 0x49640001, 0xc78, 0x48650001, 0xc78, 0x47660001, 0xc78, 0x46670001, 0xc78, 0x45680001, 0xc78, 0x44690001, 0xc78, 0x2c6a0001, 0xc78, 0x2b6b0001, 0xc78, 0x2a6c0001, 0xc78, 0x296d0001, 0xc78, 0x286e0001, 0xc78, 0x276f0001, 0xc78, 0x26700001, 0xc78, 0x25710001, 0xc78, 0x24720001, 0xc78, 0x23730001, 0xc78, 0x22740001, 0xc78, 0x09750001, 0xc78, 0x08760001, 0xc78, 0x07770001, 0xc78, 0x06780001, 0xc78, 0x05790001, 0xc78, 0x047a0001, 0xc78, 0x037b0001, 0xc78, 0x027c0001, 0xc78, 0x017d0001, 0xc78, 0x007e0001, 0xc78, 0x007f0001, 0xc78, 0x3000001e, 0xc78, 0x3001001e, 0xc78, 0x3002001e, 0xc78, 0x3003001e, 0xc78, 0x3004001e, 0xc78, 0x3405001e, 0xc78, 0x3806001e, 0xc78, 0x3e07001e, 0xc78, 0x3e08001e, 0xc78, 0x4409001e, 0xc78, 0x460a001e, 0xc78, 0x480b001e, 0xc78, 0x480c001e, 0xc78, 0x4e0d001e, 0xc78, 0x560e001e, 0xc78, 0x5a0f001e, 0xc78, 0x5e10001e, 0xc78, 0x6211001e, 0xc78, 0x6c12001e, 0xc78, 0x7213001e, 0xc78, 0x7214001e, 0xc78, 0x7215001e, 0xc78, 0x7216001e, 0xc78, 0x7217001e, 0xc78, 0x7218001e, 0xc78, 0x7219001e, 0xc78, 0x721a001e, 0xc78, 0x721b001e, 0xc78, 0x721c001e, 0xc78, 0x721d001e, 0xc78, 0x721e001e, 0xc78, 0x721f001e, };
gpl-2.0
pershoot/android_kernel_samsung_p4
arch/x86/lib/inat.c
4184
2513
/* * x86 instruction attribute tables * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <asm/insn.h> /* Attribute tables are generated from opcode map */ #include "inat-tables.c" /* Attribute search APIs */ insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) { return inat_primary_table[opcode]; } insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, insn_attr_t esc_attr) { const insn_attr_t *table; insn_attr_t lpfx_attr; int n, m = 0; n = inat_escape_id(esc_attr); if (last_pfx) { lpfx_attr = inat_get_opcode_attribute(last_pfx); m = inat_last_prefix_id(lpfx_attr); } table = inat_escape_tables[n][0]; if (!table) return 0; if (inat_has_variant(table[opcode]) && m) { table = inat_escape_tables[n][m]; if (!table) return 0; } return table[opcode]; } insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, insn_attr_t grp_attr) { const insn_attr_t *table; insn_attr_t lpfx_attr; int n, m = 0; n = inat_group_id(grp_attr); if (last_pfx) { lpfx_attr = inat_get_opcode_attribute(last_pfx); m = inat_last_prefix_id(lpfx_attr); } table = inat_group_tables[n][0]; if (!table) return inat_group_common_attribute(grp_attr); if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { table = inat_group_tables[n][m]; if (!table) return inat_group_common_attribute(grp_attr); } return table[X86_MODRM_REG(modrm)] | inat_group_common_attribute(grp_attr); } insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m, insn_byte_t vex_p) { const insn_attr_t *table; if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX) return 0; table = inat_avx_tables[vex_m][vex_p]; if (!table) return 0; return table[opcode]; }
gpl-2.0
pacerom/kernel_google_msm
arch/microblaze/mm/init.c
4696
13456
/* * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> /* mem_init */ #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/export.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/tlb.h> #include <asm/fixmap.h> /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; #ifndef CONFIG_MMU unsigned int __page_offset; EXPORT_SYMBOL(__page_offset); #else static int init_bootmem_done; #endif /* CONFIG_MMU */ char *klimit = _end; /* * Initialize the bootmem system and give it all the memory we * have available. */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_size; EXPORT_SYMBOL(memory_size); unsigned long lowmem_size; #ifdef CONFIG_HIGHMEM pte_t *kmap_pte; EXPORT_SYMBOL(kmap_pte); pgprot_t kmap_prot; EXPORT_SYMBOL(kmap_prot); static inline pte_t *virt_to_kpte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr); } static void __init highmem_init(void) { pr_debug("%x\n", (u32)PKMAP_BASE); map_page(PKMAP_BASE, 0, 0); /* XXX gross */ pkmap_page_table = virt_to_kpte(PKMAP_BASE); kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; } static unsigned long highmem_setup(void) { unsigned long pfn; unsigned long reservedpages = 0; for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) { struct page *page = pfn_to_page(pfn); /* FIXME not sure about */ if (memblock_is_reserved(pfn << PAGE_SHIFT)) continue; ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; reservedpages++; } totalram_pages += totalhigh_pages; printk(KERN_INFO "High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); return reservedpages; } #endif /* CONFIG_HIGHMEM */ /* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; #ifdef CONFIG_MMU int idx; /* Setup fixmaps */ for (idx = 0; idx < __end_of_fixed_addresses; idx++) clear_fixmap(idx); #endif /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); #ifdef CONFIG_HIGHMEM highmem_init(); zones_size[ZONE_DMA] = max_low_pfn; zones_size[ZONE_HIGHMEM] = max_pfn; #else zones_size[ZONE_DMA] = max_pfn; #endif /* We don't have holes in memory map */ free_area_init_nodes(zones_size); } void __init setup_memory(void) { unsigned long map_size; struct memblock_region *reg; #ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ for_each_memblock(memory, reg) { memory_start = (u32)reg->base; lowmem_size = reg->size; if ((memory_start <= (u32)_text) && ((u32)_text <= (memory_start + lowmem_size - 1))) { memory_size = lowmem_size; PAGE_OFFSET = memory_start; printk(KERN_INFO "%s: Main mem: 0x%x, " "size 0x%08x\n", __func__, (u32) memory_start, (u32) memory_size); break; } } if (!memory_start || !memory_size) { panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", __func__, (u32) memory_start, (u32) memory_size); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); memblock_reserve(kernel_align_start, kernel_align_size); #endif /* * Kernel: * start: base phys address of kernel - page align * end: base phys address of kernel - page align * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT; max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT; printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn); /* * Find an area to use for the bootmem bitmap. * We look for the first area which is at least * 128kB in length (128kB is enough for a bitmap * for 4GB of memory, using 4kB pages), plus 1 page * (in case the address isn't page-aligned). */ map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); /* Add active regions with valid PFNs */ for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; start_pfn = memblock_region_memory_base_pfn(reg); end_pfn = memblock_region_memory_end_pfn(reg); memblock_set_node(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT, 0); } /* free bootmem is whole main memory */ free_bootmem_with_active_regions(0, max_low_pfn); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { unsigned long top = reg->base + reg->size - 1; pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n", (u32) reg->base, (u32) reg->size, top, memory_start + lowmem_size - 1); if (top <= (memory_start + lowmem_size - 1)) { reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } else if (reg->base < (memory_start + lowmem_size - 1)) { unsigned long trunc_size = memory_start + lowmem_size - reg->base; reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); } } /* XXX need to clip this if using highmem? */ sparse_memory_present_with_active_regions(0); #ifdef CONFIG_MMU init_bootmem_done = 1; #endif paging_init(); } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { int pages = 0; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; pages++; } printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", (int)(pages * (PAGE_SIZE / 1024))); } #endif void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } void __init mem_init(void) { pg_data_t *pgdat; unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; high_memory = (void *)__va(memory_start + lowmem_size - 1); /* this will put all memory onto the freelists */ totalram_pages += free_all_bootmem(); for_each_online_pgdat(pgdat) { unsigned long i; struct page *page; for (i = 0; i < pgdat->node_spanned_pages; i++) { if (!pfn_valid(pgdat->node_start_pfn + i)) continue; page = pgdat_page_nr(pgdat, i); if (PageReserved(page)) reservedpages++; } } #ifdef CONFIG_HIGHMEM reservedpages -= highmem_setup(); #endif codesize = (unsigned long)&_sdata - (unsigned long)&_stext; datasize = (unsigned long)&_edata - (unsigned long)&_sdata; initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; pr_info("Memory: %luk/%luk available (%luk kernel code, " "%luk reserved, %luk data, %luk bss, %luk init)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, bsssize >> 10, initsize >> 10); #ifdef CONFIG_MMU pr_info("Kernel virtual memory layout:\n"); pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); #ifdef CONFIG_HIGHMEM pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); #endif /* CONFIG_HIGHMEM */ pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", ioremap_bot, ioremap_base); pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", (unsigned long)VMALLOC_START, VMALLOC_END); #endif mem_init_done = 1; } #ifndef CONFIG_MMU int page_is_ram(unsigned long pfn) { return __range_ok(pfn, 0); } #else int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; } /* * Check for command-line options that affect what MMU_init will do. */ static void mm_cmdline_setup(void) { unsigned long maxmem = 0; char *p = cmd_line; /* Look for mem= option on command line */ p = strstr(cmd_line, "mem="); if (p) { p += 4; maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; memblock.memory.regions[0].size = memory_size; } } } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ static void __init mmu_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ "mts rzpr, r11;" : : : "r11"); } /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ /* called from head.S */ asmlinkage void __init mmu_init(void) { unsigned int kstart, ksize; if (!memblock.reserved.cnt) { printk(KERN_EMERG "Error memory count\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < 0x400000) { printk(KERN_EMERG "Memory must be greater than 4MB\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < kernel_tlb) { printk(KERN_EMERG "Kernel size is greater than memory node\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; if (lowmem_size > CONFIG_LOWMEM_SIZE) { lowmem_size = CONFIG_LOWMEM_SIZE; #ifndef CONFIG_HIGHMEM memory_size = lowmem_size; #endif } mm_cmdline_setup(); /* FIXME parse args from command line - not used */ /* * Map out the kernel text/data/bss from the available physical * memory. */ kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ /* kernel size */ ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); memblock_reserve(kstart, ksize); #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ /* if (initrd_start) { mem_pieces_remove(&phys_avail, __pa(initrd_start), initrd_end - initrd_start, 1); }*/ #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ mmu_init_hw(); /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); /* Extend vmalloc and ioremap area as big as possible */ #ifdef CONFIG_HIGHMEM ioremap_base = ioremap_bot = PKMAP_BASE; #else ioremap_base = ioremap_bot = FIXADDR_START; #endif /* Initialize the context management stuff */ mmu_context_init(); /* Shortly after that, the entire linear mapping will be available */ /* This will also cause that unflatten device tree will be allocated * inside 768MB limit */ memblock_set_current_limit(memory_start + lowmem_size - 1); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { void *p; if (init_bootmem_done) { p = alloc_bootmem_pages(PAGE_SIZE); } else { /* * Mem start + kernel_tlb -> here is limit * because of mem mapping from head.S */ p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, memory_start + kernel_tlb)); } return p; } #endif /* CONFIG_MMU */ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) { if (mem_init_done) return kmalloc(size, mask); else return alloc_bootmem(size); } void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; if (mem_init_done) p = kzalloc(size, mask); else { p = alloc_bootmem(size); if (p) memset(p, 0, size); } return p; }
gpl-2.0
robcore/machinex_kernelv2
drivers/usb/host/ehci-xls.c
4952
3774
/* * EHCI HCD for Netlogic XLS processors. * * (C) Copyright 2011 Netlogic Microsystems Inc. * * Based on various ehci-*.c drivers * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/platform_device.h> static int ehci_xls_setup(struct usb_hcd *hcd) { int retval; struct ehci_hcd *ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); dbg_hcs_params(ehci, "reset"); dbg_hcc_params(ehci, "reset"); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); retval = ehci_halt(ehci); if (retval) return retval; /* data structure init */ retval = ehci_init(hcd); if (retval) return retval; ehci_reset(ehci); return retval; } int ehci_xls_probe_internal(const struct hc_driver *driver, struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; int retval, irq; /* Get our IRQ from an earlier registered Platform Resource */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n", dev_name(&pdev->dev)); return -ENODEV; } /* Get our Memory Handle */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Error: MMIO Handle %s setup!\n", dev_name(&pdev->dev)); return -ENODEV; } hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto err1; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&pdev->dev, "controller already in use\n"); retval = -EBUSY; goto err2; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&pdev->dev, "error mapping memory\n"); retval = -EFAULT; goto err3; } retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval != 0) goto err4; return retval; err4: iounmap(hcd->regs); err3: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err2: usb_put_hcd(hcd); err1: dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval); return retval; } static struct hc_driver ehci_xls_hc_driver = { .description = hcd_name, .product_desc = "XLS EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), .irq = ehci_irq, .flags = HCD_USB2 | HCD_MEMORY, .reset = ehci_xls_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .get_frame_number = ehci_get_frame, .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; static int ehci_xls_probe(struct platform_device *pdev) { if (usb_disabled()) return -ENODEV; return ehci_xls_probe_internal(&ehci_xls_hc_driver, pdev); } static int ehci_xls_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); return 0; } MODULE_ALIAS("ehci-xls"); static struct platform_driver ehci_xls_driver = { .probe = ehci_xls_probe, .remove = ehci_xls_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ehci-xls", }, };
gpl-2.0
AMD-Grifon/android_kernel_samsung_aries
fs/ufs/dir.c
8536
16595
/* * linux/fs/ufs/ufs_dir.c * * Copyright (C) 1996 * Adrian Rodriguez (adrian@franklins-tower.rutgers.edu) * Laboratory for Computer Science Research Computing Facility * Rutgers, The State University of New Jersey * * swab support by Francois-Rene Rideau <fare@tunes.org> 19970406 * * 4.4BSD (FreeBSD) support added on February 1st 1998 by * Niels Kristian Bech Jensen <nkbj@image.dk> partially based * on code by Martin von Loewis <martin@mira.isdn.cs.tu-berlin.de>. * * Migration to usage of "page cache" on May 2006 by * Evgeniy Dushistov <dushistov@mail.ru> based on ext2 code base. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/swap.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" /* * NOTE! unlike strncmp, ufs_match returns 1 for success, 0 for failure. * * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. */ static inline int ufs_match(struct super_block *sb, int len, const unsigned char *name, struct ufs_dir_entry *de) { if (len != ufs_get_de_namlen(sb, de)) return 0; if (!de->d_ino) return 0; return !memcmp(name, de->d_name, len); } static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; int err = 0; dir->i_version++; block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } if (IS_DIRSYNC(dir)) err = write_one_page(page, 1); else unlock_page(page); return err; } static inline void ufs_put_page(struct page *page) { kunmap(page); page_cache_release(page); } static inline unsigned long ufs_dir_pages(struct inode *inode) { return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) { ino_t res = 0; struct ufs_dir_entry *de; struct page *page; de = ufs_find_entry(dir, qstr, &page); if (de) { res = fs32_to_cpu(dir->i_sb, de->d_ino); ufs_put_page(page); } return res; } /* Releases the page */ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, struct page *page, struct inode *inode) { loff_t pos = page_offset(page) + (char *) de - (char *) page_address(page); unsigned len = fs16_to_cpu(dir->i_sb, de->d_reclen); int err; lock_page(page); err = ufs_prepare_chunk(page, pos, len); BUG_ON(err); de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino); ufs_set_de_type(dir->i_sb, de, inode->i_mode); err = ufs_commit_chunk(page, pos, len); ufs_put_page(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); } static void ufs_check_page(struct page *page) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; char *kaddr = page_address(page); unsigned offs, rec_len; unsigned limit = PAGE_CACHE_SIZE; const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; struct ufs_dir_entry *p; char *error; if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { limit = dir->i_size & ~PAGE_CACHE_MASK; if (limit & chunk_mask) goto Ebadsize; if (!limit) goto out; } for (offs = 0; offs <= limit - UFS_DIR_REC_LEN(1); offs += rec_len) { p = (struct ufs_dir_entry *)(kaddr + offs); rec_len = fs16_to_cpu(sb, p->d_reclen); if (rec_len < UFS_DIR_REC_LEN(1)) goto Eshort; if (rec_len & 3) goto Ealign; if (rec_len < UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, p))) goto Enamelen; if (((offs + rec_len - 1) ^ offs) & ~chunk_mask) goto Espan; if (fs32_to_cpu(sb, p->d_ino) > (UFS_SB(sb)->s_uspi->s_ipg * UFS_SB(sb)->s_uspi->s_ncg)) goto Einumber; } if (offs != limit) goto Eend; out: SetPageChecked(page); return; /* Too bad, we had an error */ Ebadsize: ufs_error(sb, "ufs_check_page", "size of directory #%lu is not a multiple of chunk size", dir->i_ino ); goto fail; Eshort: error = "rec_len is smaller than minimal"; goto bad_entry; Ealign: error = "unaligned directory entry"; goto bad_entry; Enamelen: error = "rec_len is too small for name_len"; goto bad_entry; Espan: error = "directory entry across blocks"; goto bad_entry; Einumber: error = "inode out of bounds"; bad_entry: ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " "offset=%lu, rec_len=%d, name_len=%d", dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, rec_len, ufs_get_de_namlen(sb, p)); goto fail; Eend: p = (struct ufs_dir_entry *)(kaddr + offs); ufs_error(sb, __func__, "entry in directory #%lu spans the page boundary" "offset=%lu", dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); fail: SetPageChecked(page); SetPageError(page); } static struct page *ufs_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) ufs_check_page(page); if (PageError(page)) goto fail; } return page; fail: ufs_put_page(page); return ERR_PTR(-EIO); } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned ufs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; last_byte -= page_nr << PAGE_CACHE_SHIFT; if (last_byte > PAGE_CACHE_SIZE) last_byte = PAGE_CACHE_SIZE; return last_byte; } static inline struct ufs_dir_entry * ufs_next_entry(struct super_block *sb, struct ufs_dir_entry *p) { return (struct ufs_dir_entry *)((char *)p + fs16_to_cpu(sb, p->d_reclen)); } struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p) { struct page *page = ufs_get_page(dir, 0); struct ufs_dir_entry *de = NULL; if (!IS_ERR(page)) { de = ufs_next_entry(dir->i_sb, (struct ufs_dir_entry *)page_address(page)); *p = page; } return de; } /* * ufs_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the page in which the entry was found, and the entry itself * (as a parameter - res_dir). Page is returned mapped and unlocked. * Entry is guaranteed to be valid. */ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, struct page **res_page) { struct super_block *sb = dir->i_sb; const unsigned char *name = qstr->name; int namelen = qstr->len; unsigned reclen = UFS_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = ufs_dir_pages(dir); struct page *page = NULL; struct ufs_inode_info *ui = UFS_I(dir); struct ufs_dir_entry *de; UFSD("ENTER, dir_ino %lu, name %s, namlen %u\n", dir->i_ino, name, namelen); if (npages == 0 || namelen > UFS_MAXNAMLEN) goto out; /* OFFSET_CACHE */ *res_page = NULL; start = ui->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { char *kaddr; page = ufs_get_page(dir, n); if (!IS_ERR(page)) { kaddr = page_address(page); de = (struct ufs_dir_entry *) kaddr; kaddr += ufs_last_byte(dir, n) - reclen; while ((char *) de <= kaddr) { if (de->d_reclen == 0) { ufs_error(dir->i_sb, __func__, "zero-length directory entry"); ufs_put_page(page); goto out; } if (ufs_match(sb, namelen, name, de)) goto found; de = ufs_next_entry(sb, de); } ufs_put_page(page); } if (++n >= npages) n = 0; } while (n != start); out: return NULL; found: *res_page = page; ui->i_dir_start_lookup = n; return de; } /* * Parent is locked. */ int ufs_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block *sb = dir->i_sb; unsigned reclen = UFS_DIR_REC_LEN(namelen); const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; unsigned short rec_len, name_len; struct page *page = NULL; struct ufs_dir_entry *de; unsigned long npages = ufs_dir_pages(dir); unsigned long n; char *kaddr; loff_t pos; int err; UFSD("ENTER, name %s, namelen %u\n", name, namelen); /* * We take care of directory expansion in the same loop. * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *dir_end; page = ufs_get_page(dir, n); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = page_address(page); dir_end = kaddr + ufs_last_byte(dir, n); de = (struct ufs_dir_entry *)kaddr; kaddr += PAGE_CACHE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ name_len = 0; rec_len = chunk_size; de->d_reclen = cpu_to_fs16(sb, chunk_size); de->d_ino = 0; goto got_it; } if (de->d_reclen == 0) { ufs_error(dir->i_sb, __func__, "zero-length directory entry"); err = -EIO; goto out_unlock; } err = -EEXIST; if (ufs_match(sb, namelen, name, de)) goto out_unlock; name_len = UFS_DIR_REC_LEN(ufs_get_de_namlen(sb, de)); rec_len = fs16_to_cpu(sb, de->d_reclen); if (!de->d_ino && rec_len >= reclen) goto got_it; if (rec_len >= name_len + reclen) goto got_it; de = (struct ufs_dir_entry *) ((char *) de + rec_len); } unlock_page(page); ufs_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + (char*)de - (char*)page_address(page); err = ufs_prepare_chunk(page, pos, rec_len); if (err) goto out_unlock; if (de->d_ino) { struct ufs_dir_entry *de1 = (struct ufs_dir_entry *) ((char *) de + name_len); de1->d_reclen = cpu_to_fs16(sb, rec_len - name_len); de->d_reclen = cpu_to_fs16(sb, name_len); de = de1; } ufs_set_de_namlen(sb, de, namelen); memcpy(de->d_name, name, namelen + 1); de->d_ino = cpu_to_fs32(sb, inode->i_ino); ufs_set_de_type(sb, de, inode->i_mode); err = ufs_commit_chunk(page, pos, rec_len); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); /* OFFSET_CACHE */ out_put: ufs_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } static inline unsigned ufs_validate_entry(struct super_block *sb, char *base, unsigned offset, unsigned mask) { struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset); struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask)); while ((char*)p < (char*)de) { if (p->d_reclen == 0) break; p = ufs_next_entry(sb, p); } return (char *)p - base; } /* * This is blatantly stolen from ext2fs */ static int ufs_readdir(struct file *filp, void *dirent, filldir_t filldir) { loff_t pos = filp->f_pos; struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = ufs_dir_pages(inode); unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); int need_revalidate = filp->f_version != inode->i_version; unsigned flags = UFS_SB(sb)->s_flags; UFSD("BEGIN\n"); if (pos > inode->i_size - UFS_DIR_REC_LEN(1)) return 0; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; struct ufs_dir_entry *de; struct page *page = ufs_get_page(inode, n); if (IS_ERR(page)) { ufs_error(sb, __func__, "bad page in #%lu", inode->i_ino); filp->f_pos += PAGE_CACHE_SIZE - offset; return -EIO; } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset; } filp->f_version = inode->i_version; need_revalidate = 0; } de = (struct ufs_dir_entry *)(kaddr+offset); limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1); for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) { if (de->d_reclen == 0) { ufs_error(sb, __func__, "zero-length directory entry"); ufs_put_page(page); return -EIO; } if (de->d_ino) { int over; unsigned char d_type = DT_UNKNOWN; offset = (char *)de - kaddr; UFSD("filldir(%s,%u)\n", de->d_name, fs32_to_cpu(sb, de->d_ino)); UFSD("namlen %u\n", ufs_get_de_namlen(sb, de)); if ((flags & UFS_DE_MASK) == UFS_DE_44BSD) d_type = de->d_u.d_44.d_type; over = filldir(dirent, de->d_name, ufs_get_de_namlen(sb, de), (n<<PAGE_CACHE_SHIFT) | offset, fs32_to_cpu(sb, de->d_ino), d_type); if (over) { ufs_put_page(page); return 0; } } filp->f_pos += fs16_to_cpu(sb, de->d_reclen); } ufs_put_page(page); } return 0; } /* * ufs_delete_entry deletes a directory entry by merging it with the * previous entry. */ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir, struct page * page) { struct super_block *sb = inode->i_sb; char *kaddr = page_address(page); unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen); loff_t pos; struct ufs_dir_entry *pde = NULL; struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from); int err; UFSD("ENTER\n"); UFSD("ino %u, reclen %u, namlen %u, name %s\n", fs32_to_cpu(sb, de->d_ino), fs16_to_cpu(sb, de->d_reclen), ufs_get_de_namlen(sb, de), de->d_name); while ((char*)de < (char*)dir) { if (de->d_reclen == 0) { ufs_error(inode->i_sb, __func__, "zero-length directory entry"); err = -EIO; goto out; } pde = de; de = ufs_next_entry(sb, de); } if (pde) from = (char*)pde - (char*)page_address(page); pos = page_offset(page) + from; lock_page(page); err = ufs_prepare_chunk(page, pos, to - from); BUG_ON(err); if (pde) pde->d_reclen = cpu_to_fs16(sb, to - from); dir->d_ino = 0; err = ufs_commit_chunk(page, pos, to - from); inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); out: ufs_put_page(page); UFSD("EXIT\n"); return err; } int ufs_make_empty(struct inode * inode, struct inode *dir) { struct super_block * sb = dir->i_sb; struct address_space *mapping = inode->i_mapping; struct page *page = grab_cache_page(mapping, 0); const unsigned int chunk_size = UFS_SB(sb)->s_uspi->s_dirblksize; struct ufs_dir_entry * de; char *base; int err; if (!page) return -ENOMEM; err = ufs_prepare_chunk(page, 0, chunk_size); if (err) { unlock_page(page); goto fail; } kmap(page); base = (char*)page_address(page); memset(base, 0, PAGE_CACHE_SIZE); de = (struct ufs_dir_entry *) base; de->d_ino = cpu_to_fs32(sb, inode->i_ino); ufs_set_de_type(sb, de, inode->i_mode); ufs_set_de_namlen(sb, de, 1); de->d_reclen = cpu_to_fs16(sb, UFS_DIR_REC_LEN(1)); strcpy (de->d_name, "."); de = (struct ufs_dir_entry *) ((char *)de + fs16_to_cpu(sb, de->d_reclen)); de->d_ino = cpu_to_fs32(sb, dir->i_ino); ufs_set_de_type(sb, de, dir->i_mode); de->d_reclen = cpu_to_fs16(sb, chunk_size - UFS_DIR_REC_LEN(1)); ufs_set_de_namlen(sb, de, 2); strcpy (de->d_name, ".."); kunmap(page); err = ufs_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int ufs_empty_dir(struct inode * inode) { struct super_block *sb = inode->i_sb; struct page *page = NULL; unsigned long i, npages = ufs_dir_pages(inode); for (i = 0; i < npages; i++) { char *kaddr; struct ufs_dir_entry *de; page = ufs_get_page(inode, i); if (IS_ERR(page)) continue; kaddr = page_address(page); de = (struct ufs_dir_entry *)kaddr; kaddr += ufs_last_byte(inode, i) - UFS_DIR_REC_LEN(1); while ((char *)de <= kaddr) { if (de->d_reclen == 0) { ufs_error(inode->i_sb, __func__, "zero-length directory entry: " "kaddr=%p, de=%p\n", kaddr, de); goto not_empty; } if (de->d_ino) { u16 namelen=ufs_get_de_namlen(sb, de); /* check for . and .. */ if (de->d_name[0] != '.') goto not_empty; if (namelen > 2) goto not_empty; if (namelen < 2) { if (inode->i_ino != fs32_to_cpu(sb, de->d_ino)) goto not_empty; } else if (de->d_name[1] != '.') goto not_empty; } de = ufs_next_entry(sb, de); } ufs_put_page(page); } return 1; not_empty: ufs_put_page(page); return 0; } const struct file_operations ufs_dir_operations = { .read = generic_read_dir, .readdir = ufs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, };
gpl-2.0
laitianli/loongson-linux-2.6.36-3_mips
arch/x86/boot/mkcpustr.c
9048
1251
/* ----------------------------------------------------------------------- * * * Copyright 2008 rPath, Inc. - All Rights Reserved * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2 or (at your * option) any later version; incorporated herein by reference. * * ----------------------------------------------------------------------- */ /* * This is a host program to preprocess the CPU strings into a * compact format suitable for the setup code. */ #include <stdio.h> #include "../kernel/cpu/capflags.c" int main(void) { int i, j; const char *str; printf("static const char x86_cap_strs[] =\n"); for (i = 0; i < NCAPINTS; i++) { for (j = 0; j < 32; j++) { str = x86_cap_flags[i*32+j]; if (i == NCAPINTS-1 && j == 31) { /* The last entry must be unconditional; this also consumes the compiler-added null character */ if (!str) str = ""; printf("\t\"\\x%02x\\x%02x\"\"%s\"\n", i, j, str); } else if (str) { printf("#if REQUIRED_MASK%d & (1 << %d)\n" "\t\"\\x%02x\\x%02x\"\"%s\\0\"\n" "#endif\n", i, j, i, j, str); } } } printf("\t;\n"); return 0; }
gpl-2.0
kcarden/android_kernel_lge_msm8916
kernel/gcov/fs.c
10584
19303
/* * This code exports profiling data as debugfs files to userspace. * * Copyright IBM Corp. 2009 * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * * Uses gcc-internal data definitions. * Based on the gcov-kernel patch by: * Hubertus Franke <frankeh@us.ibm.com> * Nigel Hinds <nhinds@us.ibm.com> * Rajan Ravindran <rajancr@us.ibm.com> * Peter Oberparleiter <oberpar@linux.vnet.ibm.com> * Paul Larson * Yi CDL Yang */ #define pr_fmt(fmt) "gcov: " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include "gcov.h" /** * struct gcov_node - represents a debugfs entry * @list: list head for child node list * @children: child nodes * @all: list head for list of all nodes * @parent: parent node * @loaded_info: array of pointers to profiling data sets for loaded object * files. * @num_loaded: number of profiling data sets for loaded object files. * @unloaded_info: accumulated copy of profiling data sets for unloaded * object files. Used only when gcov_persist=1. * @dentry: main debugfs entry, either a directory or data file * @links: associated symbolic links * @name: data file basename * * struct gcov_node represents an entity within the gcov/ subdirectory * of debugfs. There are directory and data file nodes. The latter represent * the actual synthesized data file plus any associated symbolic links which * are needed by the gcov tool to work correctly. */ struct gcov_node { struct list_head list; struct list_head children; struct list_head all; struct gcov_node *parent; struct gcov_info **loaded_info; struct gcov_info *unloaded_info; struct dentry *dentry; struct dentry **links; int num_loaded; char name[0]; }; static const char objtree[] = OBJTREE; static const char srctree[] = SRCTREE; static struct gcov_node root_node; static struct dentry *reset_dentry; static LIST_HEAD(all_head); static DEFINE_MUTEX(node_lock); /* If non-zero, keep copies of profiling data for unloaded modules. */ static int gcov_persist = 1; static int __init gcov_persist_setup(char *str) { unsigned long val; if (strict_strtoul(str, 0, &val)) { pr_warning("invalid gcov_persist parameter '%s'\n", str); return 0; } gcov_persist = val; pr_info("setting gcov_persist to %d\n", gcov_persist); return 1; } __setup("gcov_persist=", gcov_persist_setup); /* * seq_file.start() implementation for gcov data files. Note that the * gcov_iterator interface is designed to be more restrictive than seq_file * (no start from arbitrary position, etc.), to simplify the iterator * implementation. */ static void *gcov_seq_start(struct seq_file *seq, loff_t *pos) { loff_t i; gcov_iter_start(seq->private); for (i = 0; i < *pos; i++) { if (gcov_iter_next(seq->private)) return NULL; } return seq->private; } /* seq_file.next() implementation for gcov data files. */ static void *gcov_seq_next(struct seq_file *seq, void *data, loff_t *pos) { struct gcov_iterator *iter = data; if (gcov_iter_next(iter)) return NULL; (*pos)++; return iter; } /* seq_file.show() implementation for gcov data files. */ static int gcov_seq_show(struct seq_file *seq, void *data) { struct gcov_iterator *iter = data; if (gcov_iter_write(iter, seq)) return -EINVAL; return 0; } static void gcov_seq_stop(struct seq_file *seq, void *data) { /* Unused. */ } static const struct seq_operations gcov_seq_ops = { .start = gcov_seq_start, .next = gcov_seq_next, .show = gcov_seq_show, .stop = gcov_seq_stop, }; /* * Return a profiling data set associated with the given node. This is * either a data set for a loaded object file or a data set copy in case * all associated object files have been unloaded. */ static struct gcov_info *get_node_info(struct gcov_node *node) { if (node->num_loaded > 0) return node->loaded_info[0]; return node->unloaded_info; } /* * Return a newly allocated profiling data set which contains the sum of * all profiling data associated with the given node. */ static struct gcov_info *get_accumulated_info(struct gcov_node *node) { struct gcov_info *info; int i = 0; if (node->unloaded_info) info = gcov_info_dup(node->unloaded_info); else info = gcov_info_dup(node->loaded_info[i++]); if (!info) return NULL; for (; i < node->num_loaded; i++) gcov_info_add(info, node->loaded_info[i]); return info; } /* * open() implementation for gcov data files. Create a copy of the profiling * data set and initialize the iterator and seq_file interface. */ static int gcov_seq_open(struct inode *inode, struct file *file) { struct gcov_node *node = inode->i_private; struct gcov_iterator *iter; struct seq_file *seq; struct gcov_info *info; int rc = -ENOMEM; mutex_lock(&node_lock); /* * Read from a profiling data copy to minimize reference tracking * complexity and concurrent access and to keep accumulating multiple * profiling data sets associated with one node simple. */ info = get_accumulated_info(node); if (!info) goto out_unlock; iter = gcov_iter_new(info); if (!iter) goto err_free_info; rc = seq_open(file, &gcov_seq_ops); if (rc) goto err_free_iter_info; seq = file->private_data; seq->private = iter; out_unlock: mutex_unlock(&node_lock); return rc; err_free_iter_info: gcov_iter_free(iter); err_free_info: gcov_info_free(info); goto out_unlock; } /* * release() implementation for gcov data files. Release resources allocated * by open(). */ static int gcov_seq_release(struct inode *inode, struct file *file) { struct gcov_iterator *iter; struct gcov_info *info; struct seq_file *seq; seq = file->private_data; iter = seq->private; info = gcov_iter_get_info(iter); gcov_iter_free(iter); gcov_info_free(info); seq_release(inode, file); return 0; } /* * Find a node by the associated data file name. Needs to be called with * node_lock held. */ static struct gcov_node *get_node_by_name(const char *name) { struct gcov_node *node; struct gcov_info *info; list_for_each_entry(node, &all_head, all) { info = get_node_info(node); if (info && (strcmp(info->filename, name) == 0)) return node; } return NULL; } /* * Reset all profiling data associated with the specified node. */ static void reset_node(struct gcov_node *node) { int i; if (node->unloaded_info) gcov_info_reset(node->unloaded_info); for (i = 0; i < node->num_loaded; i++) gcov_info_reset(node->loaded_info[i]); } static void remove_node(struct gcov_node *node); /* * write() implementation for gcov data files. Reset profiling data for the * corresponding file. If all associated object files have been unloaded, * remove the debug fs node as well. */ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct seq_file *seq; struct gcov_info *info; struct gcov_node *node; seq = file->private_data; info = gcov_iter_get_info(seq->private); mutex_lock(&node_lock); node = get_node_by_name(info->filename); if (node) { /* Reset counts or remove node for unloaded modules. */ if (node->num_loaded == 0) remove_node(node); else reset_node(node); } /* Reset counts for open file. */ gcov_info_reset(info); mutex_unlock(&node_lock); return len; } /* * Given a string <path> representing a file path of format: * path/to/file.gcda * construct and return a new string: * <dir/>path/to/file.<ext> */ static char *link_target(const char *dir, const char *path, const char *ext) { char *target; char *old_ext; char *copy; copy = kstrdup(path, GFP_KERNEL); if (!copy) return NULL; old_ext = strrchr(copy, '.'); if (old_ext) *old_ext = '\0'; if (dir) target = kasprintf(GFP_KERNEL, "%s/%s.%s", dir, copy, ext); else target = kasprintf(GFP_KERNEL, "%s.%s", copy, ext); kfree(copy); return target; } /* * Construct a string representing the symbolic link target for the given * gcov data file name and link type. Depending on the link type and the * location of the data file, the link target can either point to a * subdirectory of srctree, objtree or in an external location. */ static char *get_link_target(const char *filename, const struct gcov_link *ext) { const char *rel; char *result; if (strncmp(filename, objtree, strlen(objtree)) == 0) { rel = filename + strlen(objtree) + 1; if (ext->dir == SRC_TREE) result = link_target(srctree, rel, ext->ext); else result = link_target(objtree, rel, ext->ext); } else { /* External compilation. */ result = link_target(NULL, filename, ext->ext); } return result; } #define SKEW_PREFIX ".tmp_" /* * For a filename .tmp_filename.ext return filename.ext. Needed to compensate * for filename skewing caused by the mod-versioning mechanism. */ static const char *deskew(const char *basename) { if (strncmp(basename, SKEW_PREFIX, sizeof(SKEW_PREFIX) - 1) == 0) return basename + sizeof(SKEW_PREFIX) - 1; return basename; } /* * Create links to additional files (usually .c and .gcno files) which the * gcov tool expects to find in the same directory as the gcov data file. */ static void add_links(struct gcov_node *node, struct dentry *parent) { char *basename; char *target; int num; int i; for (num = 0; gcov_link[num].ext; num++) /* Nothing. */; node->links = kcalloc(num, sizeof(struct dentry *), GFP_KERNEL); if (!node->links) return; for (i = 0; i < num; i++) { target = get_link_target(get_node_info(node)->filename, &gcov_link[i]); if (!target) goto out_err; basename = strrchr(target, '/'); if (!basename) goto out_err; basename++; node->links[i] = debugfs_create_symlink(deskew(basename), parent, target); if (!node->links[i]) goto out_err; kfree(target); } return; out_err: kfree(target); while (i-- > 0) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } static const struct file_operations gcov_data_fops = { .open = gcov_seq_open, .release = gcov_seq_release, .read = seq_read, .llseek = seq_lseek, .write = gcov_seq_write, }; /* Basic initialization of a new node. */ static void init_node(struct gcov_node *node, struct gcov_info *info, const char *name, struct gcov_node *parent) { INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->all); if (node->loaded_info) { node->loaded_info[0] = info; node->num_loaded = 1; } node->parent = parent; if (name) strcpy(node->name, name); } /* * Create a new node and associated debugfs entry. Needs to be called with * node_lock held. */ static struct gcov_node *new_node(struct gcov_node *parent, struct gcov_info *info, const char *name) { struct gcov_node *node; node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); if (!node) goto err_nomem; if (info) { node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), GFP_KERNEL); if (!node->loaded_info) goto err_nomem; } init_node(node, info, name, parent); /* Differentiate between gcov data file nodes and directory nodes. */ if (info) { node->dentry = debugfs_create_file(deskew(node->name), 0600, parent->dentry, node, &gcov_data_fops); } else node->dentry = debugfs_create_dir(node->name, parent->dentry); if (!node->dentry) { pr_warning("could not create file\n"); kfree(node); return NULL; } if (info) add_links(node, parent->dentry); list_add(&node->list, &parent->children); list_add(&node->all, &all_head); return node; err_nomem: kfree(node); pr_warning("out of memory\n"); return NULL; } /* Remove symbolic links associated with node. */ static void remove_links(struct gcov_node *node) { int i; if (!node->links) return; for (i = 0; gcov_link[i].ext; i++) debugfs_remove(node->links[i]); kfree(node->links); node->links = NULL; } /* * Remove node from all lists and debugfs and release associated resources. * Needs to be called with node_lock held. */ static void release_node(struct gcov_node *node) { list_del(&node->list); list_del(&node->all); debugfs_remove(node->dentry); remove_links(node); kfree(node->loaded_info); if (node->unloaded_info) gcov_info_free(node->unloaded_info); kfree(node); } /* Release node and empty parents. Needs to be called with node_lock held. */ static void remove_node(struct gcov_node *node) { struct gcov_node *parent; while ((node != &root_node) && list_empty(&node->children)) { parent = node->parent; release_node(node); node = parent; } } /* * Find child node with given basename. Needs to be called with node_lock * held. */ static struct gcov_node *get_child_by_name(struct gcov_node *parent, const char *name) { struct gcov_node *node; list_for_each_entry(node, &parent->children, list) { if (strcmp(node->name, name) == 0) return node; } return NULL; } /* * write() implementation for reset file. Reset all profiling data to zero * and remove nodes for which all associated object files are unloaded. */ static ssize_t reset_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) { struct gcov_node *node; mutex_lock(&node_lock); restart: list_for_each_entry(node, &all_head, all) { if (node->num_loaded > 0) reset_node(node); else if (list_empty(&node->children)) { remove_node(node); /* Several nodes may have gone - restart loop. */ goto restart; } } mutex_unlock(&node_lock); return len; } /* read() implementation for reset file. Unused. */ static ssize_t reset_read(struct file *file, char __user *addr, size_t len, loff_t *pos) { /* Allow read operation so that a recursive copy won't fail. */ return 0; } static const struct file_operations gcov_reset_fops = { .write = reset_write, .read = reset_read, .llseek = noop_llseek, }; /* * Create a node for a given profiling data set and add it to all lists and * debugfs. Needs to be called with node_lock held. */ static void add_node(struct gcov_info *info) { char *filename; char *curr; char *next; struct gcov_node *parent; struct gcov_node *node; filename = kstrdup(info->filename, GFP_KERNEL); if (!filename) return; parent = &root_node; /* Create directory nodes along the path. */ for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { if (curr == next) continue; *next = 0; if (strcmp(curr, ".") == 0) continue; if (strcmp(curr, "..") == 0) { if (!parent->parent) goto err_remove; parent = parent->parent; continue; } node = get_child_by_name(parent, curr); if (!node) { node = new_node(parent, NULL, curr); if (!node) goto err_remove; } parent = node; } /* Create file node. */ node = new_node(parent, info, curr); if (!node) goto err_remove; out: kfree(filename); return; err_remove: remove_node(parent); goto out; } /* * Associate a profiling data set with an existing node. Needs to be called * with node_lock held. */ static void add_info(struct gcov_node *node, struct gcov_info *info) { struct gcov_info **loaded_info; int num = node->num_loaded; /* * Prepare new array. This is done first to simplify cleanup in * case the new data set is incompatible, the node only contains * unloaded data sets and there's not enough memory for the array. */ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); if (!loaded_info) { pr_warning("could not add '%s' (out of memory)\n", info->filename); return; } memcpy(loaded_info, node->loaded_info, num * sizeof(struct gcov_info *)); loaded_info[num] = info; /* Check if the new data set is compatible. */ if (num == 0) { /* * A module was unloaded, modified and reloaded. The new * data set replaces the copy of the last one. */ if (!gcov_info_is_compatible(node->unloaded_info, info)) { pr_warning("discarding saved data for %s " "(incompatible version)\n", info->filename); gcov_info_free(node->unloaded_info); node->unloaded_info = NULL; } } else { /* * Two different versions of the same object file are loaded. * The initial one takes precedence. */ if (!gcov_info_is_compatible(node->loaded_info[0], info)) { pr_warning("could not add '%s' (incompatible " "version)\n", info->filename); kfree(loaded_info); return; } } /* Overwrite previous array. */ kfree(node->loaded_info); node->loaded_info = loaded_info; node->num_loaded = num + 1; } /* * Return the index of a profiling data set associated with a node. */ static int get_info_index(struct gcov_node *node, struct gcov_info *info) { int i; for (i = 0; i < node->num_loaded; i++) { if (node->loaded_info[i] == info) return i; } return -ENOENT; } /* * Save the data of a profiling data set which is being unloaded. */ static void save_info(struct gcov_node *node, struct gcov_info *info) { if (node->unloaded_info) gcov_info_add(node->unloaded_info, info); else { node->unloaded_info = gcov_info_dup(info); if (!node->unloaded_info) { pr_warning("could not save data for '%s' " "(out of memory)\n", info->filename); } } } /* * Disassociate a profiling data set from a node. Needs to be called with * node_lock held. */ static void remove_info(struct gcov_node *node, struct gcov_info *info) { int i; i = get_info_index(node, info); if (i < 0) { pr_warning("could not remove '%s' (not found)\n", info->filename); return; } if (gcov_persist) save_info(node, info); /* Shrink array. */ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; node->num_loaded--; if (node->num_loaded > 0) return; /* Last loaded data set was removed. */ kfree(node->loaded_info); node->loaded_info = NULL; node->num_loaded = 0; if (!node->unloaded_info) remove_node(node); } /* * Callback to create/remove profiling files when code compiled with * -fprofile-arcs is loaded/unloaded. */ void gcov_event(enum gcov_action action, struct gcov_info *info) { struct gcov_node *node; mutex_lock(&node_lock); node = get_node_by_name(info->filename); switch (action) { case GCOV_ADD: if (node) add_info(node, info); else add_node(info); break; case GCOV_REMOVE: if (node) remove_info(node, info); else { pr_warning("could not remove '%s' (not found)\n", info->filename); } break; } mutex_unlock(&node_lock); } /* Create debugfs entries. */ static __init int gcov_fs_init(void) { int rc = -EIO; init_node(&root_node, NULL, NULL, NULL); /* * /sys/kernel/debug/gcov will be parent for the reset control file * and all profiling files. */ root_node.dentry = debugfs_create_dir("gcov", NULL); if (!root_node.dentry) goto err_remove; /* * Create reset file which resets all profiling counts when written * to. */ reset_dentry = debugfs_create_file("reset", 0600, root_node.dentry, NULL, &gcov_reset_fops); if (!reset_dentry) goto err_remove; /* Replay previous events to get our fs hierarchy up-to-date. */ gcov_enable_events(); return 0; err_remove: pr_err("init failed\n"); if (root_node.dentry) debugfs_remove(root_node.dentry); return rc; } device_initcall(gcov_fs_init);
gpl-2.0
JoinTheRealms/TF700-dualboot-hunds
arch/powerpc/boot/cuboot-c2k.c
13656
4884
/* * GEFanuc C2K platform code. * * Author: Remi Machet <rmachet@slac.stanford.edu> * * Originated from prpmc2800.c * * 2008 (c) Stanford University * 2007 (c) MontaVista, Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "types.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "elf.h" #include "gunzip_util.h" #include "mv64x60.h" #include "cuboot.h" #include "ppcboot.h" static u8 *bridge_base; static void c2k_bridge_setup(u32 mem_size) { u32 i, v[30], enables, acc_bits; u32 pci_base_hi, pci_base_lo, size, buf[2]; unsigned long cpu_base; int rc; void *devp, *mv64x60_devp; u8 *bridge_pbase, is_coherent; struct mv64x60_cpu2pci_win *tbl; int bus; bridge_pbase = mv64x60_get_bridge_pbase(); is_coherent = mv64x60_is_coherent(); if (is_coherent) acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; else acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); mv64x60_devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (mv64x60_devp == NULL) fatal("Error: Missing marvell,mv64360 device tree node\n\r"); enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); enables |= 0x007ffe00; /* Disable all cpu->pci windows */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = NULL; for (bus = 0; ; bus++) { char name[] = "pci "; name[strlen(name)-1] = bus+'0'; devp = find_node_by_alias(name); if (devp == NULL) break; if (bus >= 2) fatal("Error: Only 2 PCI controllers are supported at" \ " this time.\n"); mv64x60_config_pci_windows(bridge_base, bridge_pbase, bus, 0, mem_size, acc_bits); rc = getprop(devp, "ranges", v, sizeof(v)); if (rc == 0) fatal("Error: Can't find marvell,mv64360-pci ranges" " property\n\r"); /* Get the cpu -> pci i/o & mem mappings from the device tree */ for (i = 0; i < rc; i += 6) { switch (v[i] & 0xff000000) { case 0x01000000: /* PCI I/O Space */ tbl = mv64x60_cpu2pci_io; break; case 0x02000000: /* PCI MEM Space */ tbl = mv64x60_cpu2pci_mem; break; default: continue; } pci_base_hi = v[i+1]; pci_base_lo = v[i+2]; cpu_base = v[i+3]; size = v[i+5]; buf[0] = cpu_base; buf[1] = size; if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) fatal("Error: Can't translate PCI address " \ "0x%x\n\r", (u32)cpu_base); mv64x60_config_cpu2pci_window(bridge_base, bus, pci_base_hi, pci_base_lo, cpu_base, size, tbl); } enables &= ~(3<<(9+bus*5)); /* Enable cpu->pci<bus> i/o, cpu->pci<bus> mem0 */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); }; } static void c2k_fixups(void) { u32 mem_size; mem_size = mv64x60_get_mem_size(bridge_base); c2k_bridge_setup(mem_size); /* Do necessary bridge setup */ } #define MV64x60_MPP_CNTL_0 0xf000 #define MV64x60_MPP_CNTL_2 0xf008 #define MV64x60_GPP_IO_CNTL 0xf100 #define MV64x60_GPP_LEVEL_CNTL 0xf110 #define MV64x60_GPP_VALUE_SET 0xf118 static void c2k_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); } static bd_t bd; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); bridge_base = mv64x60_get_bridge_base(); platform_ops.fixups = c2k_fixups; platform_ops.exit = c2k_reset; if (serial_console_init() < 0) exit(); }
gpl-2.0
cyandream-devices/neak-n7100-jb
arch/powerpc/boot/cuboot-c2k.c
13656
4884
/* * GEFanuc C2K platform code. * * Author: Remi Machet <rmachet@slac.stanford.edu> * * Originated from prpmc2800.c * * 2008 (c) Stanford University * 2007 (c) MontaVista, Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "types.h" #include "stdio.h" #include "io.h" #include "ops.h" #include "elf.h" #include "gunzip_util.h" #include "mv64x60.h" #include "cuboot.h" #include "ppcboot.h" static u8 *bridge_base; static void c2k_bridge_setup(u32 mem_size) { u32 i, v[30], enables, acc_bits; u32 pci_base_hi, pci_base_lo, size, buf[2]; unsigned long cpu_base; int rc; void *devp, *mv64x60_devp; u8 *bridge_pbase, is_coherent; struct mv64x60_cpu2pci_win *tbl; int bus; bridge_pbase = mv64x60_get_bridge_pbase(); is_coherent = mv64x60_is_coherent(); if (is_coherent) acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_WB | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_32_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_32_BYTES; else acc_bits = MV64x60_PCI_ACC_CNTL_SNOOP_NONE | MV64x60_PCI_ACC_CNTL_SWAP_NONE | MV64x60_PCI_ACC_CNTL_MBURST_128_BYTES | MV64x60_PCI_ACC_CNTL_RDSIZE_256_BYTES; mv64x60_config_ctlr_windows(bridge_base, bridge_pbase, is_coherent); mv64x60_devp = find_node_by_compatible(NULL, "marvell,mv64360"); if (mv64x60_devp == NULL) fatal("Error: Missing marvell,mv64360 device tree node\n\r"); enables = in_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE)); enables |= 0x007ffe00; /* Disable all cpu->pci windows */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); /* Get the cpu -> pci i/o & mem mappings from the device tree */ devp = NULL; for (bus = 0; ; bus++) { char name[] = "pci "; name[strlen(name)-1] = bus+'0'; devp = find_node_by_alias(name); if (devp == NULL) break; if (bus >= 2) fatal("Error: Only 2 PCI controllers are supported at" \ " this time.\n"); mv64x60_config_pci_windows(bridge_base, bridge_pbase, bus, 0, mem_size, acc_bits); rc = getprop(devp, "ranges", v, sizeof(v)); if (rc == 0) fatal("Error: Can't find marvell,mv64360-pci ranges" " property\n\r"); /* Get the cpu -> pci i/o & mem mappings from the device tree */ for (i = 0; i < rc; i += 6) { switch (v[i] & 0xff000000) { case 0x01000000: /* PCI I/O Space */ tbl = mv64x60_cpu2pci_io; break; case 0x02000000: /* PCI MEM Space */ tbl = mv64x60_cpu2pci_mem; break; default: continue; } pci_base_hi = v[i+1]; pci_base_lo = v[i+2]; cpu_base = v[i+3]; size = v[i+5]; buf[0] = cpu_base; buf[1] = size; if (!dt_xlate_addr(devp, buf, sizeof(buf), &cpu_base)) fatal("Error: Can't translate PCI address " \ "0x%x\n\r", (u32)cpu_base); mv64x60_config_cpu2pci_window(bridge_base, bus, pci_base_hi, pci_base_lo, cpu_base, size, tbl); } enables &= ~(3<<(9+bus*5)); /* Enable cpu->pci<bus> i/o, cpu->pci<bus> mem0 */ out_le32((u32 *)(bridge_base + MV64x60_CPU_BAR_ENABLE), enables); }; } static void c2k_fixups(void) { u32 mem_size; mem_size = mv64x60_get_mem_size(bridge_base); c2k_bridge_setup(mem_size); /* Do necessary bridge setup */ } #define MV64x60_MPP_CNTL_0 0xf000 #define MV64x60_MPP_CNTL_2 0xf008 #define MV64x60_GPP_IO_CNTL 0xf100 #define MV64x60_GPP_LEVEL_CNTL 0xf110 #define MV64x60_GPP_VALUE_SET 0xf118 static void c2k_reset(void) { u32 temp; udelay(5000000); if (bridge_base != 0) { temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_0), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00000004; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2)); temp &= 0xFFFF0FFF; out_le32((u32 *)(bridge_base + MV64x60_MPP_CNTL_2), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_LEVEL_CNTL), temp); temp = in_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL)); temp |= 0x00080000; out_le32((u32 *)(bridge_base + MV64x60_GPP_IO_CNTL), temp); out_le32((u32 *)(bridge_base + MV64x60_GPP_VALUE_SET), 0x00080004); } for (;;); } static bd_t bd; void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); bridge_base = mv64x60_get_bridge_base(); platform_ops.fixups = c2k_fixups; platform_ops.exit = c2k_reset; if (serial_console_init() < 0) exit(); }
gpl-2.0
icot/mysql-5.6.21
storage/ndb/src/common/portlib/NdbTick.c
89
5057
/* Copyright (C) 2003-2008 MySQL AB, 2009 Sun Microsystems, Inc. All rights reserved. Use is subject to license terms. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <ndb_global.h> #include <NdbTick.h> #define NANOSEC_PER_SEC 1000000000 #define MICROSEC_PER_SEC 1000000 #define MILLISEC_PER_SEC 1000 #define MICROSEC_PER_MILLISEC 1000 #define NANOSEC_PER_MILLISEC 1000000 #define NANOSEC_PER_MICROSEC 1000 #ifdef HAVE_CLOCK_GETTIME #ifdef CLOCK_MONOTONIC static clockid_t NdbTick_clk_id = CLOCK_MONOTONIC; #else static clockid_t NdbTick_clk_id = CLOCK_REALTIME; #endif void NdbTick_Init(int need_monotonic) { struct timespec tick_time; if (!need_monotonic) NdbTick_clk_id = CLOCK_REALTIME; if (clock_gettime(NdbTick_clk_id, &tick_time) == 0) return; #ifdef CLOCK_MONOTONIC fprintf(stderr, "Failed to use CLOCK_MONOTONIC for clock_realtime," " errno= %u\n", errno); fflush(stderr); NdbTick_clk_id = CLOCK_REALTIME; if (clock_gettime(NdbTick_clk_id, &tick_time) == 0) return; #endif fprintf(stderr, "Failed to use CLOCK_REALTIME for clock_realtime," " errno=%u. Aborting\n", errno); fflush(stderr); abort(); } NDB_TICKS NdbTick_CurrentMillisecond(void) { struct timespec tick_time; clock_gettime(NdbTick_clk_id, &tick_time); return ((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)MILLISEC_PER_SEC) + ((NDB_TICKS)tick_time.tv_nsec) / ((NDB_TICKS)NANOSEC_PER_MILLISEC); } int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ struct timespec t; int res = clock_gettime(NdbTick_clk_id, &t); * secs = t.tv_sec; * micros = t.tv_nsec / 1000; return res; } NDB_TICKS NdbTick_CurrentNanosecond(void) { struct timespec tick_time; clock_gettime(NdbTick_clk_id, &tick_time); return (((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)NANOSEC_PER_SEC)) + ((NDB_TICKS)tick_time.tv_nsec); } #else void NdbTick_Init(int need_monotonic) { } NDB_TICKS NdbTick_CurrentMillisecond(void) { #ifdef _WIN32 NDB_TICKS secs; Uint32 micros; NdbTick_CurrentMicrosecond(&secs, &micros); return secs*1000 + micros/1000; #else struct timeval tick_time; gettimeofday(&tick_time, 0); return ((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)MILLISEC_PER_SEC) + ((NDB_TICKS)tick_time.tv_usec) / ((NDB_TICKS)MICROSEC_PER_MILLISEC); #endif } int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros) { #ifdef _WIN32 ulonglong time, timemicro, micropart, secpart; GetSystemTimeAsFileTime((FILETIME*)&time); timemicro = time/10; secpart = timemicro/1000000; micropart = timemicro%1000000; assert(micropart <= ULONG_MAX); assert(secpart*1000000+micropart == timemicro); *micros = (Uint32)micropart; *secs = secpart; return 0; #else struct timeval tick_time; int res = gettimeofday(&tick_time, 0); if(secs==0) { NDB_TICKS local_secs = tick_time.tv_sec; *micros = tick_time.tv_usec; *micros = local_secs*1000000+*micros; } else { * secs = tick_time.tv_sec; * micros = tick_time.tv_usec; } return res; #endif } NDB_TICKS NdbTick_CurrentNanosecond(void) { #ifdef _WIN32 NDB_TICKS secs; Uint32 micros; NdbTick_CurrentMicrosecond(&secs, &micros); return secs*NANOSEC_PER_SEC + micros*NANOSEC_PER_MICROSEC; #else struct timeval tick_time; gettimeofday(&tick_time, 0); return (((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)NANOSEC_PER_SEC)) + (((NDB_TICKS)tick_time.tv_usec) * ((NDB_TICKS)NANOSEC_PER_MICROSEC)); #endif } #endif int NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer) { NDB_TICKS secs; Uint32 mics; int ret_value; ret_value = NdbTick_CurrentMicrosecond(&secs, &mics); input_timer->seconds = secs; input_timer->micro_seconds = (NDB_TICKS)mics; return ret_value; } NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start, struct MicroSecondTimer stop) { NDB_TICKS ret_value = (NDB_TICKS)0; if (start.seconds < stop.seconds) { NDB_TICKS sec_passed = stop.seconds - start.seconds; ret_value = ((NDB_TICKS)MICROSEC_PER_SEC) * sec_passed; } else if (start.seconds > stop.seconds) { return ret_value; } if (start.micro_seconds < stop.micro_seconds) { ret_value += (stop.micro_seconds - start.micro_seconds); } else if (ret_value != (NDB_TICKS)0) { ret_value -= (start.micro_seconds - stop.micro_seconds); } return ret_value; }
gpl-2.0
NEKTech-Labs/wrapfs-kernel-linux-3.17
drivers/staging/rtl8821ae/cam.c
89
9832
/****************************************************************************** * * Copyright(c) 2009-2010 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "wifi.h" #include "cam.h" #include <linux/export.h> void rtl_cam_reset_sec_info(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->sec.use_defaultkey = false; rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION; rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION; memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN); memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE); rtlpriv->sec.pairwise_key = NULL; } static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, u8 *mac_addr, u8 *key_cont_128, u16 us_config) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 target_command; u32 target_content = 0; u8 entry_i; RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :", key_cont_128, 16); for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { target_command = entry_i + CAM_CONTENT_COUNT * entry_no; target_command = target_command | BIT(31) | BIT(16); if (entry_i == 0) { target_content = (u32) (*(mac_addr + 0)) << 16 | (u32) (*(mac_addr + 1)) << 24 | (u32) us_config; rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], target_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE %x: %x \n", rtlpriv->cfg->maps[WCAMI], target_content)); RT_TRACE(COMP_SEC, DBG_LOUD, ("The Key ID is %d\n", entry_no)); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE %x: %x \n", rtlpriv->cfg->maps[RWCAM], target_command)); } else if (entry_i == 1) { target_content = (u32) (*(mac_addr + 5)) << 24 | (u32) (*(mac_addr + 4)) << 16 | (u32) (*(mac_addr + 3)) << 8 | (u32) (*(mac_addr + 2)); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], target_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE A4: %x \n", target_content)); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE A0: %x \n", target_command)); } else { target_content = (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) << 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2)) << 16 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0)); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], target_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], target_command); udelay(100); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE A4: %x \n", target_content)); RT_TRACE(COMP_SEC, DBG_LOUD, ("WRITE A0: %x \n", target_command)); } } RT_TRACE(COMP_SEC, DBG_LOUD, ("after set key, usconfig:%x\n", us_config)); } u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, u32 ul_default_key, u8 *key_content) { u32 us_config; struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(COMP_SEC, DBG_DMESG, ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, " "ulUseDK=%x MacAddr %pM\n", ul_entry_idx, ul_key_id, ul_enc_alg, ul_default_key, mac_addr)); if (ul_key_id == TOTAL_CAM_ENTRY) { RT_TRACE(COMP_ERR, DBG_WARNING, ("ulKeyId exceed!\n")); return 0; } if (ul_default_key == 1) { us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2); } else { us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id; } rtl_cam_program_entry(hw, ul_entry_idx, mac_addr, (u8 *) key_content, us_config); RT_TRACE(COMP_SEC, DBG_DMESG, ("end \n")); return 1; } int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, u32 ul_key_id) { u32 ul_command; struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id)); ul_command = ul_key_id * CAM_CONTENT_COUNT; ul_command = ul_command | BIT(31) | BIT(16); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); RT_TRACE(COMP_SEC, DBG_DMESG, ("rtl_cam_delete_one_entry(): WRITE A4: %x \n", 0)); RT_TRACE(COMP_SEC, DBG_DMESG, ("rtl_cam_delete_one_entry(): WRITE A0: %x \n", ul_command)); return 0; } void rtl_cam_reset_all_entry(struct ieee80211_hw *hw) { u32 ul_command; struct rtl_priv *rtlpriv = rtl_priv(hw); ul_command = BIT(31) | BIT(30); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); } void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 ul_command; u32 ul_content; u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; switch (rtlpriv->sec.pairwise_enc_algorithm) { case WEP40_ENCRYPTION: ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40]; break; case WEP104_ENCRYPTION: ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104]; break; case TKIP_ENCRYPTION: ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP]; break; case AESCCMP_ENCRYPTION: ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; break; default: ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; } ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2); ul_content |= BIT(15); ul_command = CAM_CONTENT_COUNT * uc_index; ul_command = ul_command | BIT(31) | BIT(16); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); RT_TRACE(COMP_SEC, DBG_DMESG, ("rtl_cam_mark_invalid(): WRITE A4: %x \n", ul_content)); RT_TRACE(COMP_SEC, DBG_DMESG, ("rtl_cam_mark_invalid(): WRITE A0: %x \n", ul_command)); } void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 ul_command; u32 ul_content; u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; u8 entry_i; switch (rtlpriv->sec.pairwise_enc_algorithm) { case WEP40_ENCRYPTION: ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40]; break; case WEP104_ENCRYPTION: ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104]; break; case TKIP_ENCRYPTION: ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP]; break; case AESCCMP_ENCRYPTION: ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; break; default: ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; } for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { if (entry_i == 0) { ul_content = (uc_index & 0x03) | ((u16) (ul_encalgo) << 2); ul_content |= BIT(15); } else { ul_content = 0; } ul_command = CAM_CONTENT_COUNT * uc_index + entry_i; ul_command = ul_command | BIT(31) | BIT(16); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content); rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); RT_TRACE(COMP_SEC, DBG_LOUD, ("rtl_cam_empty_entry(): WRITE A4: %x \n", ul_content)); RT_TRACE(COMP_SEC, DBG_LOUD, ("rtl_cam_empty_entry(): WRITE A0: %x \n", ul_command)); } } u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4; u8 entry_idx = 0; u8 i, *addr; if (NULL == sta_addr) { RT_TRACE(COMP_SEC, DBG_EMERG, ("sta_addr is NULL.\n")); return TOTAL_CAM_ENTRY; } /* Does STA already exist? */ for (i = 4; i < TOTAL_CAM_ENTRY; i++) { addr = rtlpriv->sec.hwsec_cam_sta_addr[i]; if(memcmp(addr, sta_addr, ETH_ALEN) == 0) return i; } /* Get a free CAM entry. */ for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) { if ((bitmap & BIT(0)) == 0) { RT_TRACE(COMP_SEC, DBG_EMERG, ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", rtlpriv->sec.hwsec_cam_bitmap, entry_idx)); rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx; memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx], sta_addr, ETH_ALEN); return entry_idx; } bitmap = bitmap >>1; } return TOTAL_CAM_ENTRY; } void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 bitmap; u8 i, *addr; if (NULL == sta_addr) { RT_TRACE(COMP_SEC, DBG_EMERG, ("sta_addr is NULL.\n")); } if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\ sta_addr[4]|sta_addr[5]) == 0) { RT_TRACE(COMP_SEC, DBG_EMERG, ("sta_addr is 00:00:00:00:00:00.\n")); return; } /* Does STA already exist? */ for (i = 4; i < TOTAL_CAM_ENTRY; i++) { addr = rtlpriv->sec.hwsec_cam_sta_addr[i]; bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i; if (((bitmap & BIT(0)) == BIT(0)) && (memcmp(addr, sta_addr, ETH_ALEN) == 0)) { /* Remove from HW Security CAM */ memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN); rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); printk("&&&&&&&&&del entry %d\n",i); } } return; }
gpl-2.0
mayqueenEMBEDDED/mq-kernel
drivers/input/touchscreen/colibri-vf50-ts.c
345
9751
/* * Toradex Colibri VF50 Touchscreen driver * * Copyright 2015 Toradex AG * * Originally authored by Stefan Agner for 3.0 kernel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/iio/consumer.h> #include <linux/iio/types.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #define DRIVER_NAME "colibri-vf50-ts" #define DRV_VERSION "1.0" #define VF_ADC_MAX ((1 << 12) - 1) #define COLI_TOUCH_MIN_DELAY_US 1000 #define COLI_TOUCH_MAX_DELAY_US 2000 #define COLI_PULLUP_MIN_DELAY_US 10000 #define COLI_PULLUP_MAX_DELAY_US 11000 #define COLI_TOUCH_NO_OF_AVGS 5 #define COLI_TOUCH_REQ_ADC_CHAN 4 struct vf50_touch_device { struct platform_device *pdev; struct input_dev *ts_input; struct iio_channel *channels; struct gpio_desc *gpio_xp; struct gpio_desc *gpio_xm; struct gpio_desc *gpio_yp; struct gpio_desc *gpio_ym; int pen_irq; int min_pressure; bool stop_touchscreen; }; /* * Enables given plates and measures touch parameters using ADC */ static int adc_ts_measure(struct iio_channel *channel, struct gpio_desc *plate_p, struct gpio_desc *plate_m) { int i, value = 0, val = 0; int error; gpiod_set_value(plate_p, 1); gpiod_set_value(plate_m, 1); usleep_range(COLI_TOUCH_MIN_DELAY_US, COLI_TOUCH_MAX_DELAY_US); for (i = 0; i < COLI_TOUCH_NO_OF_AVGS; i++) { error = iio_read_channel_raw(channel, &val); if (error < 0) { value = error; goto error_iio_read; } value += val; } value /= COLI_TOUCH_NO_OF_AVGS; error_iio_read: gpiod_set_value(plate_p, 0); gpiod_set_value(plate_m, 0); return value; } /* * Enable touch detection using falling edge detection on XM */ static void vf50_ts_enable_touch_detection(struct vf50_touch_device *vf50_ts) { /* Enable plate YM (needs to be strong GND, high active) */ gpiod_set_value(vf50_ts->gpio_ym, 1); /* * Let the platform mux to idle state in order to enable * Pull-Up on GPIO */ pinctrl_pm_select_idle_state(&vf50_ts->pdev->dev); /* Wait for the pull-up to be stable on high */ usleep_range(COLI_PULLUP_MIN_DELAY_US, COLI_PULLUP_MAX_DELAY_US); } /* * ADC touch screen sampling bottom half irq handler */ static irqreturn_t vf50_ts_irq_bh(int irq, void *private) { struct vf50_touch_device *vf50_ts = private; struct device *dev = &vf50_ts->pdev->dev; int val_x, val_y, val_z1, val_z2, val_p = 0; bool discard_val_on_start = true; /* Disable the touch detection plates */ gpiod_set_value(vf50_ts->gpio_ym, 0); /* Let the platform mux to default state in order to mux as ADC */ pinctrl_pm_select_default_state(dev); while (!vf50_ts->stop_touchscreen) { /* X-Direction */ val_x = adc_ts_measure(&vf50_ts->channels[0], vf50_ts->gpio_xp, vf50_ts->gpio_xm); if (val_x < 0) break; /* Y-Direction */ val_y = adc_ts_measure(&vf50_ts->channels[1], vf50_ts->gpio_yp, vf50_ts->gpio_ym); if (val_y < 0) break; /* * Touch pressure * Measure on XP/YM */ val_z1 = adc_ts_measure(&vf50_ts->channels[2], vf50_ts->gpio_yp, vf50_ts->gpio_xm); if (val_z1 < 0) break; val_z2 = adc_ts_measure(&vf50_ts->channels[3], vf50_ts->gpio_yp, vf50_ts->gpio_xm); if (val_z2 < 0) break; /* Validate signal (avoid calculation using noise) */ if (val_z1 > 64 && val_x > 64) { /* * Calculate resistance between the plates * lower resistance means higher pressure */ int r_x = (1000 * val_x) / VF_ADC_MAX; val_p = (r_x * val_z2) / val_z1 - r_x; } else { val_p = 2000; } val_p = 2000 - val_p; dev_dbg(dev, "Measured values: x: %d, y: %d, z1: %d, z2: %d, p: %d\n", val_x, val_y, val_z1, val_z2, val_p); /* * If touch pressure is too low, stop measuring and reenable * touch detection */ if (val_p < vf50_ts->min_pressure || val_p > 2000) break; /* * The pressure may not be enough for the first x and the * second y measurement, but, the pressure is ok when the * driver is doing the third and fourth measurement. To * take care of this, we drop the first measurement always. */ if (discard_val_on_start) { discard_val_on_start = false; } else { /* * Report touch position and sleep for * the next measurement. */ input_report_abs(vf50_ts->ts_input, ABS_X, VF_ADC_MAX - val_x); input_report_abs(vf50_ts->ts_input, ABS_Y, VF_ADC_MAX - val_y); input_report_abs(vf50_ts->ts_input, ABS_PRESSURE, val_p); input_report_key(vf50_ts->ts_input, BTN_TOUCH, 1); input_sync(vf50_ts->ts_input); } usleep_range(COLI_PULLUP_MIN_DELAY_US, COLI_PULLUP_MAX_DELAY_US); } /* Report no more touch, re-enable touch detection */ input_report_abs(vf50_ts->ts_input, ABS_PRESSURE, 0); input_report_key(vf50_ts->ts_input, BTN_TOUCH, 0); input_sync(vf50_ts->ts_input); vf50_ts_enable_touch_detection(vf50_ts); return IRQ_HANDLED; } static int vf50_ts_open(struct input_dev *dev_input) { struct vf50_touch_device *touchdev = input_get_drvdata(dev_input); struct device *dev = &touchdev->pdev->dev; dev_dbg(dev, "Input device %s opened, starting touch detection\n", dev_input->name); touchdev->stop_touchscreen = false; /* Mux detection before request IRQ, wait for pull-up to settle */ vf50_ts_enable_touch_detection(touchdev); return 0; } static void vf50_ts_close(struct input_dev *dev_input) { struct vf50_touch_device *touchdev = input_get_drvdata(dev_input); struct device *dev = &touchdev->pdev->dev; touchdev->stop_touchscreen = true; /* Make sure IRQ is not running past close */ mb(); synchronize_irq(touchdev->pen_irq); gpiod_set_value(touchdev->gpio_ym, 0); pinctrl_pm_select_default_state(dev); dev_dbg(dev, "Input device %s closed, disable touch detection\n", dev_input->name); } static int vf50_ts_get_gpiod(struct device *dev, struct gpio_desc **gpio_d, const char *con_id, enum gpiod_flags flags) { int error; *gpio_d = devm_gpiod_get(dev, con_id, flags); if (IS_ERR(*gpio_d)) { error = PTR_ERR(*gpio_d); dev_err(dev, "Could not get gpio_%s %d\n", con_id, error); return error; } return 0; } static void vf50_ts_channel_release(void *data) { struct iio_channel *channels = data; iio_channel_release_all(channels); } static int vf50_ts_probe(struct platform_device *pdev) { struct input_dev *input; struct iio_channel *channels; struct device *dev = &pdev->dev; struct vf50_touch_device *touchdev; int num_adc_channels; int error; channels = iio_channel_get_all(dev); if (IS_ERR(channels)) return PTR_ERR(channels); error = devm_add_action(dev, vf50_ts_channel_release, channels); if (error) { iio_channel_release_all(channels); dev_err(dev, "Failed to register iio channel release action"); return error; } num_adc_channels = 0; while (channels[num_adc_channels].indio_dev) num_adc_channels++; if (num_adc_channels != COLI_TOUCH_REQ_ADC_CHAN) { dev_err(dev, "Inadequate ADC channels specified\n"); return -EINVAL; } touchdev = devm_kzalloc(dev, sizeof(*touchdev), GFP_KERNEL); if (!touchdev) return -ENOMEM; touchdev->pdev = pdev; touchdev->channels = channels; error = of_property_read_u32(dev->of_node, "vf50-ts-min-pressure", &touchdev->min_pressure); if (error) return error; input = devm_input_allocate_device(dev); if (!input) { dev_err(dev, "Failed to allocate TS input device\n"); return -ENOMEM; } platform_set_drvdata(pdev, touchdev); input->name = DRIVER_NAME; input->id.bustype = BUS_HOST; input->dev.parent = dev; input->open = vf50_ts_open; input->close = vf50_ts_close; input_set_capability(input, EV_KEY, BTN_TOUCH); input_set_abs_params(input, ABS_X, 0, VF_ADC_MAX, 0, 0); input_set_abs_params(input, ABS_Y, 0, VF_ADC_MAX, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, VF_ADC_MAX, 0, 0); touchdev->ts_input = input; input_set_drvdata(input, touchdev); error = input_register_device(input); if (error) { dev_err(dev, "Failed to register input device\n"); return error; } error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xp, "xp", GPIOD_OUT_LOW); if (error) return error; error = vf50_ts_get_gpiod(dev, &touchdev->gpio_xm, "xm", GPIOD_OUT_LOW); if (error) return error; error = vf50_ts_get_gpiod(dev, &touchdev->gpio_yp, "yp", GPIOD_OUT_LOW); if (error) return error; error = vf50_ts_get_gpiod(dev, &touchdev->gpio_ym, "ym", GPIOD_OUT_LOW); if (error) return error; touchdev->pen_irq = platform_get_irq(pdev, 0); if (touchdev->pen_irq < 0) return touchdev->pen_irq; error = devm_request_threaded_irq(dev, touchdev->pen_irq, NULL, vf50_ts_irq_bh, IRQF_ONESHOT, "vf50 touch", touchdev); if (error) { dev_err(dev, "Failed to request IRQ %d: %d\n", touchdev->pen_irq, error); return error; } return 0; } static const struct of_device_id vf50_touch_of_match[] = { { .compatible = "toradex,vf50-touchscreen", }, { } }; MODULE_DEVICE_TABLE(of, vf50_touch_of_match); static struct platform_driver vf50_touch_driver = { .driver = { .name = "toradex,vf50_touchctrl", .of_match_table = vf50_touch_of_match, }, .probe = vf50_ts_probe, }; module_platform_driver(vf50_touch_driver); MODULE_AUTHOR("Sanchayan Maity"); MODULE_DESCRIPTION("Colibri VF50 Touchscreen driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
schlund/nerdey-kernel
drivers/hwmon/adm1026.c
601
60096
/* adm1026.c - Part of lm_sensors, Linux kernel modules for hardware monitoring Copyright (C) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com> Copyright (C) 2004 Justin Thiessen <jthiessen@penguincomputing.com> Chip details at: <http://www.analog.com/UploadedFiles/Data_Sheets/779263102ADM1026_a.pdf> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ I2C_CLIENT_INSMOD_1(adm1026); static int gpio_input[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_output[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_inverted[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_normal[17] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; static int gpio_fan[8] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_array(gpio_input, int, NULL, 0); MODULE_PARM_DESC(gpio_input, "List of GPIO pins (0-16) to program as inputs"); module_param_array(gpio_output, int, NULL, 0); MODULE_PARM_DESC(gpio_output, "List of GPIO pins (0-16) to program as " "outputs"); module_param_array(gpio_inverted, int, NULL, 0); MODULE_PARM_DESC(gpio_inverted, "List of GPIO pins (0-16) to program as " "inverted"); module_param_array(gpio_normal, int, NULL, 0); MODULE_PARM_DESC(gpio_normal, "List of GPIO pins (0-16) to program as " "normal/non-inverted"); module_param_array(gpio_fan, int, NULL, 0); MODULE_PARM_DESC(gpio_fan, "List of GPIO pins (0-7) to program as fan tachs"); /* Many ADM1026 constants specified below */ /* The ADM1026 registers */ #define ADM1026_REG_CONFIG1 0x00 #define CFG1_MONITOR 0x01 #define CFG1_INT_ENABLE 0x02 #define CFG1_INT_CLEAR 0x04 #define CFG1_AIN8_9 0x08 #define CFG1_THERM_HOT 0x10 #define CFG1_DAC_AFC 0x20 #define CFG1_PWM_AFC 0x40 #define CFG1_RESET 0x80 #define ADM1026_REG_CONFIG2 0x01 /* CONFIG2 controls FAN0/GPIO0 through FAN7/GPIO7 */ #define ADM1026_REG_CONFIG3 0x07 #define CFG3_GPIO16_ENABLE 0x01 #define CFG3_CI_CLEAR 0x02 #define CFG3_VREF_250 0x04 #define CFG3_GPIO16_DIR 0x40 #define CFG3_GPIO16_POL 0x80 #define ADM1026_REG_E2CONFIG 0x13 #define E2CFG_READ 0x01 #define E2CFG_WRITE 0x02 #define E2CFG_ERASE 0x04 #define E2CFG_ROM 0x08 #define E2CFG_CLK_EXT 0x80 /* There are 10 general analog inputs and 7 dedicated inputs * They are: * 0 - 9 = AIN0 - AIN9 * 10 = Vbat * 11 = 3.3V Standby * 12 = 3.3V Main * 13 = +5V * 14 = Vccp (CPU core voltage) * 15 = +12V * 16 = -12V */ static u16 ADM1026_REG_IN[] = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x27, 0x29, 0x26, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f }; static u16 ADM1026_REG_IN_MIN[] = { 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x6d, 0x49, 0x6b, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f }; static u16 ADM1026_REG_IN_MAX[] = { 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x6c, 0x41, 0x6a, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47 }; /* Temperatures are: * 0 - Internal * 1 - External 1 * 2 - External 2 */ static u16 ADM1026_REG_TEMP[] = { 0x1f, 0x28, 0x29 }; static u16 ADM1026_REG_TEMP_MIN[] = { 0x69, 0x48, 0x49 }; static u16 ADM1026_REG_TEMP_MAX[] = { 0x68, 0x40, 0x41 }; static u16 ADM1026_REG_TEMP_TMIN[] = { 0x10, 0x11, 0x12 }; static u16 ADM1026_REG_TEMP_THERM[] = { 0x0d, 0x0e, 0x0f }; static u16 ADM1026_REG_TEMP_OFFSET[] = { 0x1e, 0x6e, 0x6f }; #define ADM1026_REG_FAN(nr) (0x38 + (nr)) #define ADM1026_REG_FAN_MIN(nr) (0x60 + (nr)) #define ADM1026_REG_FAN_DIV_0_3 0x02 #define ADM1026_REG_FAN_DIV_4_7 0x03 #define ADM1026_REG_DAC 0x04 #define ADM1026_REG_PWM 0x05 #define ADM1026_REG_GPIO_CFG_0_3 0x08 #define ADM1026_REG_GPIO_CFG_4_7 0x09 #define ADM1026_REG_GPIO_CFG_8_11 0x0a #define ADM1026_REG_GPIO_CFG_12_15 0x0b /* CFG_16 in REG_CFG3 */ #define ADM1026_REG_GPIO_STATUS_0_7 0x24 #define ADM1026_REG_GPIO_STATUS_8_15 0x25 /* STATUS_16 in REG_STATUS4 */ #define ADM1026_REG_GPIO_MASK_0_7 0x1c #define ADM1026_REG_GPIO_MASK_8_15 0x1d /* MASK_16 in REG_MASK4 */ #define ADM1026_REG_COMPANY 0x16 #define ADM1026_REG_VERSTEP 0x17 /* These are the recognized values for the above regs */ #define ADM1026_COMPANY_ANALOG_DEV 0x41 #define ADM1026_VERSTEP_GENERIC 0x40 #define ADM1026_VERSTEP_ADM1026 0x44 #define ADM1026_REG_MASK1 0x18 #define ADM1026_REG_MASK2 0x19 #define ADM1026_REG_MASK3 0x1a #define ADM1026_REG_MASK4 0x1b #define ADM1026_REG_STATUS1 0x20 #define ADM1026_REG_STATUS2 0x21 #define ADM1026_REG_STATUS3 0x22 #define ADM1026_REG_STATUS4 0x23 #define ADM1026_FAN_ACTIVATION_TEMP_HYST -6 #define ADM1026_FAN_CONTROL_TEMP_RANGE 20 #define ADM1026_PWM_MAX 255 /* Conversions. Rounding and limit checking is only done on the TO_REG * variants. Note that you should be a bit careful with which arguments * these macros are called: arguments may be evaluated more than once. */ /* IN are scaled acording to built-in resistors. These are the * voltages corresponding to 3/4 of full scale (192 or 0xc0) * NOTE: The -12V input needs an additional factor to account * for the Vref pullup resistor. * NEG12_OFFSET = SCALE * Vref / V-192 - Vref * = 13875 * 2.50 / 1.875 - 2500 * = 16000 * * The values in this table are based on Table II, page 15 of the * datasheet. */ static int adm1026_scaling[] = { /* .001 Volts */ 2250, 2250, 2250, 2250, 2250, 2250, 1875, 1875, 1875, 1875, 3000, 3330, 3330, 4995, 2250, 12000, 13875 }; #define NEG12_OFFSET 16000 #define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from)) #define INS_TO_REG(n, val) (SENSORS_LIMIT(SCALE(val, adm1026_scaling[n], 192),\ 0, 255)) #define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n])) /* FAN speed is measured using 22.5kHz clock and counts for 2 pulses * and we assume a 2 pulse-per-rev fan tach signal * 22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000 */ #define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \ SENSORS_LIMIT(1350000/((val)*(div)), 1, 254)) #define FAN_FROM_REG(val, div) ((val) == 0 ? -1:(val) == 0xff ? 0 : \ 1350000/((val)*(div))) #define DIV_FROM_REG(val) (1<<(val)) #define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0) /* Temperature is reported in 1 degC increments */ #define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)+((val)<0 ? -500 : 500))/1000,\ -127, 127)) #define TEMP_FROM_REG(val) ((val) * 1000) #define OFFSET_TO_REG(val) (SENSORS_LIMIT(((val)+((val)<0 ? -500 : 500))/1000,\ -127, 127)) #define OFFSET_FROM_REG(val) ((val) * 1000) #define PWM_TO_REG(val) (SENSORS_LIMIT(val, 0, 255)) #define PWM_FROM_REG(val) (val) #define PWM_MIN_TO_REG(val) ((val) & 0xf0) #define PWM_MIN_FROM_REG(val) (((val) & 0xf0) + ((val) >> 4)) /* Analog output is a voltage, and scaled to millivolts. The datasheet * indicates that the DAC could be used to drive the fans, but in our * example board (Arima HDAMA) it isn't connected to the fans at all. */ #define DAC_TO_REG(val) (SENSORS_LIMIT(((((val)*255)+500)/2500), 0, 255)) #define DAC_FROM_REG(val) (((val)*2500)/255) /* Chip sampling rates * * Some sensors are not updated more frequently than once per second * so it doesn't make sense to read them more often than that. * We cache the results and return the saved data if the driver * is called again before a second has elapsed. * * Also, there is significant configuration data for this chip * So, we keep the config data up to date in the cache * when it is written and only sample it once every 5 *minutes* */ #define ADM1026_DATA_INTERVAL (1 * HZ) #define ADM1026_CONFIG_INTERVAL (5 * 60 * HZ) /* We allow for multiple chips in a single system. * * For each registered ADM1026, we need to keep state information * at client->data. The adm1026_data structure is dynamically * allocated, when a new client structure is allocated. */ struct pwm_data { u8 pwm; u8 enable; u8 auto_pwm_min; }; struct adm1026_data { struct device *hwmon_dev; struct mutex update_lock; int valid; /* !=0 if following fields are valid */ unsigned long last_reading; /* In jiffies */ unsigned long last_config; /* In jiffies */ u8 in[17]; /* Register value */ u8 in_max[17]; /* Register value */ u8 in_min[17]; /* Register value */ s8 temp[3]; /* Register value */ s8 temp_min[3]; /* Register value */ s8 temp_max[3]; /* Register value */ s8 temp_tmin[3]; /* Register value */ s8 temp_crit[3]; /* Register value */ s8 temp_offset[3]; /* Register value */ u8 fan[8]; /* Register value */ u8 fan_min[8]; /* Register value */ u8 fan_div[8]; /* Decoded value */ struct pwm_data pwm1; /* Pwm control values */ u8 vrm; /* VRM version */ u8 analog_out; /* Register value (DAC) */ long alarms; /* Register encoding, combined */ long alarm_mask; /* Register encoding, combined */ long gpio; /* Register encoding, combined */ long gpio_mask; /* Register encoding, combined */ u8 gpio_config[17]; /* Decoded value */ u8 config1; /* Register value */ u8 config2; /* Register value */ u8 config3; /* Register value */ }; static int adm1026_probe(struct i2c_client *client, const struct i2c_device_id *id); static int adm1026_detect(struct i2c_client *client, int kind, struct i2c_board_info *info); static int adm1026_remove(struct i2c_client *client); static int adm1026_read_value(struct i2c_client *client, u8 reg); static int adm1026_write_value(struct i2c_client *client, u8 reg, int value); static void adm1026_print_gpio(struct i2c_client *client); static void adm1026_fixup_gpio(struct i2c_client *client); static struct adm1026_data *adm1026_update_device(struct device *dev); static void adm1026_init_client(struct i2c_client *client); static const struct i2c_device_id adm1026_id[] = { { "adm1026", adm1026 }, { } }; MODULE_DEVICE_TABLE(i2c, adm1026_id); static struct i2c_driver adm1026_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "adm1026", }, .probe = adm1026_probe, .remove = adm1026_remove, .id_table = adm1026_id, .detect = adm1026_detect, .address_data = &addr_data, }; static int adm1026_read_value(struct i2c_client *client, u8 reg) { int res; if (reg < 0x80) { /* "RAM" locations */ res = i2c_smbus_read_byte_data(client, reg) & 0xff; } else { /* EEPROM, do nothing */ res = 0; } return res; } static int adm1026_write_value(struct i2c_client *client, u8 reg, int value) { int res; if (reg < 0x80) { /* "RAM" locations */ res = i2c_smbus_write_byte_data(client, reg, value); } else { /* EEPROM, do nothing */ res = 0; } return res; } static void adm1026_init_client(struct i2c_client *client) { int value, i; struct adm1026_data *data = i2c_get_clientdata(client); dev_dbg(&client->dev, "Initializing device\n"); /* Read chip config */ data->config1 = adm1026_read_value(client, ADM1026_REG_CONFIG1); data->config2 = adm1026_read_value(client, ADM1026_REG_CONFIG2); data->config3 = adm1026_read_value(client, ADM1026_REG_CONFIG3); /* Inform user of chip config */ dev_dbg(&client->dev, "ADM1026_REG_CONFIG1 is: 0x%02x\n", data->config1); if ((data->config1 & CFG1_MONITOR) == 0) { dev_dbg(&client->dev, "Monitoring not currently " "enabled.\n"); } if (data->config1 & CFG1_INT_ENABLE) { dev_dbg(&client->dev, "SMBALERT interrupts are " "enabled.\n"); } if (data->config1 & CFG1_AIN8_9) { dev_dbg(&client->dev, "in8 and in9 enabled. " "temp3 disabled.\n"); } else { dev_dbg(&client->dev, "temp3 enabled. in8 and " "in9 disabled.\n"); } if (data->config1 & CFG1_THERM_HOT) { dev_dbg(&client->dev, "Automatic THERM, PWM, " "and temp limits enabled.\n"); } if (data->config3 & CFG3_GPIO16_ENABLE) { dev_dbg(&client->dev, "GPIO16 enabled. THERM " "pin disabled.\n"); } else { dev_dbg(&client->dev, "THERM pin enabled. " "GPIO16 disabled.\n"); } if (data->config3 & CFG3_VREF_250) { dev_dbg(&client->dev, "Vref is 2.50 Volts.\n"); } else { dev_dbg(&client->dev, "Vref is 1.82 Volts.\n"); } /* Read and pick apart the existing GPIO configuration */ value = 0; for (i = 0;i <= 15;++i) { if ((i & 0x03) == 0) { value = adm1026_read_value(client, ADM1026_REG_GPIO_CFG_0_3 + i/4); } data->gpio_config[i] = value & 0x03; value >>= 2; } data->gpio_config[16] = (data->config3 >> 6) & 0x03; /* ... and then print it */ adm1026_print_gpio(client); /* If the user asks us to reprogram the GPIO config, then * do it now. */ if (gpio_input[0] != -1 || gpio_output[0] != -1 || gpio_inverted[0] != -1 || gpio_normal[0] != -1 || gpio_fan[0] != -1) { adm1026_fixup_gpio(client); } /* WE INTENTIONALLY make no changes to the limits, * offsets, pwms, fans and zones. If they were * configured, we don't want to mess with them. * If they weren't, the default is 100% PWM, no * control and will suffice until 'sensors -s' * can be run by the user. We DO set the default * value for pwm1.auto_pwm_min to its maximum * so that enabling automatic pwm fan control * without first setting a value for pwm1.auto_pwm_min * will not result in potentially dangerous fan speed decrease. */ data->pwm1.auto_pwm_min=255; /* Start monitoring */ value = adm1026_read_value(client, ADM1026_REG_CONFIG1); /* Set MONITOR, clear interrupt acknowledge and s/w reset */ value = (value | CFG1_MONITOR) & (~CFG1_INT_CLEAR & ~CFG1_RESET); dev_dbg(&client->dev, "Setting CONFIG to: 0x%02x\n", value); data->config1 = value; adm1026_write_value(client, ADM1026_REG_CONFIG1, value); /* initialize fan_div[] to hardware defaults */ value = adm1026_read_value(client, ADM1026_REG_FAN_DIV_0_3) | (adm1026_read_value(client, ADM1026_REG_FAN_DIV_4_7) << 8); for (i = 0;i <= 7;++i) { data->fan_div[i] = DIV_FROM_REG(value & 0x03); value >>= 2; } } static void adm1026_print_gpio(struct i2c_client *client) { struct adm1026_data *data = i2c_get_clientdata(client); int i; dev_dbg(&client->dev, "GPIO config is:\n"); for (i = 0;i <= 7;++i) { if (data->config2 & (1 << i)) { dev_dbg(&client->dev, "\t%sGP%s%d\n", data->gpio_config[i] & 0x02 ? "" : "!", data->gpio_config[i] & 0x01 ? "OUT" : "IN", i); } else { dev_dbg(&client->dev, "\tFAN%d\n", i); } } for (i = 8;i <= 15;++i) { dev_dbg(&client->dev, "\t%sGP%s%d\n", data->gpio_config[i] & 0x02 ? "" : "!", data->gpio_config[i] & 0x01 ? "OUT" : "IN", i); } if (data->config3 & CFG3_GPIO16_ENABLE) { dev_dbg(&client->dev, "\t%sGP%s16\n", data->gpio_config[16] & 0x02 ? "" : "!", data->gpio_config[16] & 0x01 ? "OUT" : "IN"); } else { /* GPIO16 is THERM */ dev_dbg(&client->dev, "\tTHERM\n"); } } static void adm1026_fixup_gpio(struct i2c_client *client) { struct adm1026_data *data = i2c_get_clientdata(client); int i; int value; /* Make the changes requested. */ /* We may need to unlock/stop monitoring or soft-reset the * chip before we can make changes. This hasn't been * tested much. FIXME */ /* Make outputs */ for (i = 0;i <= 16;++i) { if (gpio_output[i] >= 0 && gpio_output[i] <= 16) { data->gpio_config[gpio_output[i]] |= 0x01; } /* if GPIO0-7 is output, it isn't a FAN tach */ if (gpio_output[i] >= 0 && gpio_output[i] <= 7) { data->config2 |= 1 << gpio_output[i]; } } /* Input overrides output */ for (i = 0;i <= 16;++i) { if (gpio_input[i] >= 0 && gpio_input[i] <= 16) { data->gpio_config[gpio_input[i]] &= ~ 0x01; } /* if GPIO0-7 is input, it isn't a FAN tach */ if (gpio_input[i] >= 0 && gpio_input[i] <= 7) { data->config2 |= 1 << gpio_input[i]; } } /* Inverted */ for (i = 0;i <= 16;++i) { if (gpio_inverted[i] >= 0 && gpio_inverted[i] <= 16) { data->gpio_config[gpio_inverted[i]] &= ~ 0x02; } } /* Normal overrides inverted */ for (i = 0;i <= 16;++i) { if (gpio_normal[i] >= 0 && gpio_normal[i] <= 16) { data->gpio_config[gpio_normal[i]] |= 0x02; } } /* Fan overrides input and output */ for (i = 0;i <= 7;++i) { if (gpio_fan[i] >= 0 && gpio_fan[i] <= 7) { data->config2 &= ~(1 << gpio_fan[i]); } } /* Write new configs to registers */ adm1026_write_value(client, ADM1026_REG_CONFIG2, data->config2); data->config3 = (data->config3 & 0x3f) | ((data->gpio_config[16] & 0x03) << 6); adm1026_write_value(client, ADM1026_REG_CONFIG3, data->config3); for (i = 15, value = 0;i >= 0;--i) { value <<= 2; value |= data->gpio_config[i] & 0x03; if ((i & 0x03) == 0) { adm1026_write_value(client, ADM1026_REG_GPIO_CFG_0_3 + i/4, value); value = 0; } } /* Print the new config */ adm1026_print_gpio(client); } static struct adm1026_data *adm1026_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int i; long value, alarms, gpio; mutex_lock(&data->update_lock); if (!data->valid || time_after(jiffies, data->last_reading + ADM1026_DATA_INTERVAL)) { /* Things that change quickly */ dev_dbg(&client->dev, "Reading sensor values\n"); for (i = 0;i <= 16;++i) { data->in[i] = adm1026_read_value(client, ADM1026_REG_IN[i]); } for (i = 0;i <= 7;++i) { data->fan[i] = adm1026_read_value(client, ADM1026_REG_FAN(i)); } for (i = 0;i <= 2;++i) { /* NOTE: temp[] is s8 and we assume 2's complement * "conversion" in the assignment */ data->temp[i] = adm1026_read_value(client, ADM1026_REG_TEMP[i]); } data->pwm1.pwm = adm1026_read_value(client, ADM1026_REG_PWM); data->analog_out = adm1026_read_value(client, ADM1026_REG_DAC); /* GPIO16 is MSbit of alarms, move it to gpio */ alarms = adm1026_read_value(client, ADM1026_REG_STATUS4); gpio = alarms & 0x80 ? 0x0100 : 0; /* GPIO16 */ alarms &= 0x7f; alarms <<= 8; alarms |= adm1026_read_value(client, ADM1026_REG_STATUS3); alarms <<= 8; alarms |= adm1026_read_value(client, ADM1026_REG_STATUS2); alarms <<= 8; alarms |= adm1026_read_value(client, ADM1026_REG_STATUS1); data->alarms = alarms; /* Read the GPIO values */ gpio |= adm1026_read_value(client, ADM1026_REG_GPIO_STATUS_8_15); gpio <<= 8; gpio |= adm1026_read_value(client, ADM1026_REG_GPIO_STATUS_0_7); data->gpio = gpio; data->last_reading = jiffies; }; /* last_reading */ if (!data->valid || time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) { /* Things that don't change often */ dev_dbg(&client->dev, "Reading config values\n"); for (i = 0;i <= 16;++i) { data->in_min[i] = adm1026_read_value(client, ADM1026_REG_IN_MIN[i]); data->in_max[i] = adm1026_read_value(client, ADM1026_REG_IN_MAX[i]); } value = adm1026_read_value(client, ADM1026_REG_FAN_DIV_0_3) | (adm1026_read_value(client, ADM1026_REG_FAN_DIV_4_7) << 8); for (i = 0;i <= 7;++i) { data->fan_min[i] = adm1026_read_value(client, ADM1026_REG_FAN_MIN(i)); data->fan_div[i] = DIV_FROM_REG(value & 0x03); value >>= 2; } for (i = 0; i <= 2; ++i) { /* NOTE: temp_xxx[] are s8 and we assume 2's * complement "conversion" in the assignment */ data->temp_min[i] = adm1026_read_value(client, ADM1026_REG_TEMP_MIN[i]); data->temp_max[i] = adm1026_read_value(client, ADM1026_REG_TEMP_MAX[i]); data->temp_tmin[i] = adm1026_read_value(client, ADM1026_REG_TEMP_TMIN[i]); data->temp_crit[i] = adm1026_read_value(client, ADM1026_REG_TEMP_THERM[i]); data->temp_offset[i] = adm1026_read_value(client, ADM1026_REG_TEMP_OFFSET[i]); } /* Read the STATUS/alarm masks */ alarms = adm1026_read_value(client, ADM1026_REG_MASK4); gpio = alarms & 0x80 ? 0x0100 : 0; /* GPIO16 */ alarms = (alarms & 0x7f) << 8; alarms |= adm1026_read_value(client, ADM1026_REG_MASK3); alarms <<= 8; alarms |= adm1026_read_value(client, ADM1026_REG_MASK2); alarms <<= 8; alarms |= adm1026_read_value(client, ADM1026_REG_MASK1); data->alarm_mask = alarms; /* Read the GPIO values */ gpio |= adm1026_read_value(client, ADM1026_REG_GPIO_MASK_8_15); gpio <<= 8; gpio |= adm1026_read_value(client, ADM1026_REG_GPIO_MASK_0_7); data->gpio_mask = gpio; /* Read various values from CONFIG1 */ data->config1 = adm1026_read_value(client, ADM1026_REG_CONFIG1); if (data->config1 & CFG1_PWM_AFC) { data->pwm1.enable = 2; data->pwm1.auto_pwm_min = PWM_MIN_FROM_REG(data->pwm1.pwm); } /* Read the GPIO config */ data->config2 = adm1026_read_value(client, ADM1026_REG_CONFIG2); data->config3 = adm1026_read_value(client, ADM1026_REG_CONFIG3); data->gpio_config[16] = (data->config3 >> 6) & 0x03; value = 0; for (i = 0;i <= 15;++i) { if ((i & 0x03) == 0) { value = adm1026_read_value(client, ADM1026_REG_GPIO_CFG_0_3 + i/4); } data->gpio_config[i] = value & 0x03; value >>= 2; } data->last_config = jiffies; }; /* last_config */ data->valid = 1; mutex_unlock(&data->update_lock); return data; } static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in[nr])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr])); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_min[nr] = INS_TO_REG(nr, val); adm1026_write_value(client, ADM1026_REG_IN_MIN[nr], data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr])); } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_max[nr] = INS_TO_REG(nr, val); adm1026_write_value(client, ADM1026_REG_IN_MAX[nr], data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define in_reg(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \ NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); in_reg(0); in_reg(1); in_reg(2); in_reg(3); in_reg(4); in_reg(5); in_reg(6); in_reg(7); in_reg(8); in_reg(9); in_reg(10); in_reg(11); in_reg(12); in_reg(13); in_reg(14); in_reg(15); static ssize_t show_in16(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in[16]) - NEG12_OFFSET); } static ssize_t show_in16_min(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_min[16]) - NEG12_OFFSET); } static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET); adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_in16_max(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_max[16]) - NEG12_OFFSET); } static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET); adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in16, NULL, 16); static SENSOR_DEVICE_ATTR(in16_min, S_IRUGO | S_IWUSR, show_in16_min, set_in16_min, 16); static SENSOR_DEVICE_ATTR(in16_max, S_IRUGO | S_IWUSR, show_in16_max, set_in16_max, 16); /* Now add fan read/write functions */ static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr], data->fan_div[nr])); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr], data->fan_div[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->fan_min[nr] = FAN_TO_REG(val, data->fan_div[nr]); adm1026_write_value(client, ADM1026_REG_FAN_MIN(nr), data->fan_min[nr]); mutex_unlock(&data->update_lock); return count; } #define fan_offset(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \ offset - 1); \ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ show_fan_min, set_fan_min, offset - 1); fan_offset(1); fan_offset(2); fan_offset(3); fan_offset(4); fan_offset(5); fan_offset(6); fan_offset(7); fan_offset(8); /* Adjust fan_min to account for new fan divisor */ static void fixup_fan_min(struct device *dev, int fan, int old_div) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int new_min; int new_div = data->fan_div[fan]; /* 0 and 0xff are special. Don't adjust them */ if (data->fan_min[fan] == 0 || data->fan_min[fan] == 0xff) { return; } new_min = data->fan_min[fan] * old_div / new_div; new_min = SENSORS_LIMIT(new_min, 1, 254); data->fan_min[fan] = new_min; adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min); } /* Now add fan_div read/write functions */ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", data->fan_div[nr]); } static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val, orig_div, new_div, shift; val = simple_strtol(buf, NULL, 10); new_div = DIV_TO_REG(val); if (new_div == 0) { return -EINVAL; } mutex_lock(&data->update_lock); orig_div = data->fan_div[nr]; data->fan_div[nr] = DIV_FROM_REG(new_div); if (nr < 4) { /* 0 <= nr < 4 */ shift = 2 * nr; adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3, ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) | (new_div << shift))); } else { /* 3 < nr < 8 */ shift = 2 * (nr - 4); adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7, ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) | (new_div << shift))); } if (data->fan_div[nr] != orig_div) { fixup_fan_min(dev, nr, orig_div); } mutex_unlock(&data->update_lock); return count; } #define fan_offset_div(offset) \ static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ show_fan_div, set_fan_div, offset - 1); fan_offset_div(1); fan_offset_div(2); fan_offset_div(3); fan_offset_div(4); fan_offset_div(5); fan_offset_div(6); fan_offset_div(7); fan_offset_div(8); /* Temps */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr])); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } #define temp_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \ NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, offset - 1); temp_reg(1); temp_reg(2); temp_reg(3); static ssize_t show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr])); } static ssize_t set_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_OFFSET[nr], data->temp_offset[nr]); mutex_unlock(&data->update_lock); return count; } #define temp_offset_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \ show_temp_offset, set_temp_offset, offset - 1); temp_offset_reg(1); temp_offset_reg(2); temp_offset_reg(3); static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG( ADM1026_FAN_ACTIVATION_TEMP_HYST + data->temp_tmin[nr])); } static ssize_t show_temp_auto_point2_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr] + ADM1026_FAN_CONTROL_TEMP_RANGE)); } static ssize_t show_temp_auto_point1_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr])); } static ssize_t set_temp_auto_point1_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_tmin[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_TMIN[nr], data->temp_tmin[nr]); mutex_unlock(&data->update_lock); return count; } #define temp_auto_point(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp, \ S_IRUGO | S_IWUSR, show_temp_auto_point1_temp, \ set_temp_auto_point1_temp, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp_hyst, S_IRUGO,\ show_temp_auto_point1_temp_hyst, NULL, offset - 1); \ static SENSOR_DEVICE_ATTR(temp##offset##_auto_point2_temp, S_IRUGO, \ show_temp_auto_point2_temp, NULL, offset - 1); temp_auto_point(1); temp_auto_point(2); temp_auto_point(3); static ssize_t show_temp_crit_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", (data->config1 & CFG1_THERM_HOT) >> 4); } static ssize_t set_temp_crit_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); if ((val == 1) || (val==0)) { mutex_lock(&data->update_lock); data->config1 = (data->config1 & ~CFG1_THERM_HOT) | (val << 4); adm1026_write_value(client, ADM1026_REG_CONFIG1, data->config1); mutex_unlock(&data->update_lock); } return count; } #define temp_crit_enable(offset) \ static DEVICE_ATTR(temp##offset##_crit_enable, S_IRUGO | S_IWUSR, \ show_temp_crit_enable, set_temp_crit_enable); temp_crit_enable(1); temp_crit_enable(2); temp_crit_enable(3); static ssize_t show_temp_crit(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr])); } static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1026_write_value(client, ADM1026_REG_TEMP_THERM[nr], data->temp_crit[nr]); mutex_unlock(&data->update_lock); return count; } #define temp_crit_reg(offset) \ static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \ show_temp_crit, set_temp_crit, offset - 1); temp_crit_reg(1); temp_crit_reg(2); temp_crit_reg(3); static ssize_t show_analog_out_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", DAC_FROM_REG(data->analog_out)); } static ssize_t set_analog_out_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->analog_out = DAC_TO_REG(val); adm1026_write_value(client, ADM1026_REG_DAC, data->analog_out); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(analog_out, S_IRUGO | S_IWUSR, show_analog_out_reg, set_analog_out_reg); static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); int vid = (data->gpio >> 11) & 0x1f; dev_dbg(dev, "Setting VID from GPIO11-15.\n"); return sprintf(buf, "%d\n", vid_from_reg(vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL); static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adm1026_data *data = dev_get_drvdata(dev); data->vrm = simple_strtol(buf, NULL, 10); return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg); static ssize_t show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%ld\n", data->alarms); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL); static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); int bitnr = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%ld\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(in15_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(in16_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 14); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 15); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18); static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 19); static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 20); static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 21); static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 22); static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL, 23); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24); static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26); static ssize_t show_alarm_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%ld\n", data->alarm_mask); } static ssize_t set_alarm_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); unsigned long mask; mutex_lock(&data->update_lock); data->alarm_mask = val & 0x7fffffff; mask = data->alarm_mask | (data->gpio_mask & 0x10000 ? 0x80000000 : 0); adm1026_write_value(client, ADM1026_REG_MASK1, mask & 0xff); mask >>= 8; adm1026_write_value(client, ADM1026_REG_MASK2, mask & 0xff); mask >>= 8; adm1026_write_value(client, ADM1026_REG_MASK3, mask & 0xff); mask >>= 8; adm1026_write_value(client, ADM1026_REG_MASK4, mask & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(alarm_mask, S_IRUGO | S_IWUSR, show_alarm_mask, set_alarm_mask); static ssize_t show_gpio(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%ld\n", data->gpio); } static ssize_t set_gpio(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); long gpio; mutex_lock(&data->update_lock); data->gpio = val & 0x1ffff; gpio = data->gpio; adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_0_7, gpio & 0xff); gpio >>= 8; adm1026_write_value(client, ADM1026_REG_GPIO_STATUS_8_15, gpio & 0xff); gpio = ((gpio >> 1) & 0x80) | (data->alarms >> 24 & 0x7f); adm1026_write_value(client, ADM1026_REG_STATUS4, gpio & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(gpio, S_IRUGO | S_IWUSR, show_gpio, set_gpio); static ssize_t show_gpio_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%ld\n", data->gpio_mask); } static ssize_t set_gpio_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); long mask; mutex_lock(&data->update_lock); data->gpio_mask = val & 0x1ffff; mask = data->gpio_mask; adm1026_write_value(client, ADM1026_REG_GPIO_MASK_0_7, mask & 0xff); mask >>= 8; adm1026_write_value(client, ADM1026_REG_GPIO_MASK_8_15, mask & 0xff); mask = ((mask >> 1) & 0x80) | (data->alarm_mask >> 24 & 0x7f); adm1026_write_value(client, ADM1026_REG_MASK1, mask & 0xff); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(gpio_mask, S_IRUGO | S_IWUSR, show_gpio_mask, set_gpio_mask); static ssize_t show_pwm_reg(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm1.pwm)); } static ssize_t set_pwm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); if (data->pwm1.enable == 1) { int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->pwm1.pwm = PWM_TO_REG(val); adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); mutex_unlock(&data->update_lock); } return count; } static ssize_t show_auto_pwm_min(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", data->pwm1.auto_pwm_min); } static ssize_t set_auto_pwm_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); mutex_lock(&data->update_lock); data->pwm1.auto_pwm_min = SENSORS_LIMIT(val, 0, 255); if (data->pwm1.enable == 2) { /* apply immediately */ data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) | PWM_MIN_TO_REG(data->pwm1.auto_pwm_min)); adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); } mutex_unlock(&data->update_lock); return count; } static ssize_t show_auto_pwm_max(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", ADM1026_PWM_MAX); } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct adm1026_data *data = adm1026_update_device(dev); return sprintf(buf, "%d\n", data->pwm1.enable); } static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); int val = simple_strtol(buf, NULL, 10); int old_enable; if ((val >= 0) && (val < 3)) { mutex_lock(&data->update_lock); old_enable = data->pwm1.enable; data->pwm1.enable = val; data->config1 = (data->config1 & ~CFG1_PWM_AFC) | ((val == 2) ? CFG1_PWM_AFC : 0); adm1026_write_value(client, ADM1026_REG_CONFIG1, data->config1); if (val == 2) { /* apply pwm1_auto_pwm_min to pwm1 */ data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) | PWM_MIN_TO_REG(data->pwm1.auto_pwm_min)); adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); } else if (!((old_enable == 1) && (val == 1))) { /* set pwm to safe value */ data->pwm1.pwm = 255; adm1026_write_value(client, ADM1026_REG_PWM, data->pwm1.pwm); } mutex_unlock(&data->update_lock); } return count; } /* enable PWM fan control */ static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg); static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg); static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm_reg, set_pwm_reg); static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable); static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable); static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable); static DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_auto_pwm_min, set_auto_pwm_min); static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_auto_pwm_min, set_auto_pwm_min); static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_auto_pwm_min, set_auto_pwm_min); static DEVICE_ATTR(temp1_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL); static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL); static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, show_auto_pwm_max, NULL); static struct attribute *adm1026_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in10_alarm.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_in11_max.dev_attr.attr, &sensor_dev_attr_in11_min.dev_attr.attr, &sensor_dev_attr_in11_alarm.dev_attr.attr, &sensor_dev_attr_in12_input.dev_attr.attr, &sensor_dev_attr_in12_max.dev_attr.attr, &sensor_dev_attr_in12_min.dev_attr.attr, &sensor_dev_attr_in12_alarm.dev_attr.attr, &sensor_dev_attr_in13_input.dev_attr.attr, &sensor_dev_attr_in13_max.dev_attr.attr, &sensor_dev_attr_in13_min.dev_attr.attr, &sensor_dev_attr_in13_alarm.dev_attr.attr, &sensor_dev_attr_in14_input.dev_attr.attr, &sensor_dev_attr_in14_max.dev_attr.attr, &sensor_dev_attr_in14_min.dev_attr.attr, &sensor_dev_attr_in14_alarm.dev_attr.attr, &sensor_dev_attr_in15_input.dev_attr.attr, &sensor_dev_attr_in15_max.dev_attr.attr, &sensor_dev_attr_in15_min.dev_attr.attr, &sensor_dev_attr_in15_alarm.dev_attr.attr, &sensor_dev_attr_in16_input.dev_attr.attr, &sensor_dev_attr_in16_max.dev_attr.attr, &sensor_dev_attr_in16_min.dev_attr.attr, &sensor_dev_attr_in16_alarm.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_div.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_div.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan4_div.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, &sensor_dev_attr_fan5_input.dev_attr.attr, &sensor_dev_attr_fan5_div.dev_attr.attr, &sensor_dev_attr_fan5_min.dev_attr.attr, &sensor_dev_attr_fan5_alarm.dev_attr.attr, &sensor_dev_attr_fan6_input.dev_attr.attr, &sensor_dev_attr_fan6_div.dev_attr.attr, &sensor_dev_attr_fan6_min.dev_attr.attr, &sensor_dev_attr_fan6_alarm.dev_attr.attr, &sensor_dev_attr_fan7_input.dev_attr.attr, &sensor_dev_attr_fan7_div.dev_attr.attr, &sensor_dev_attr_fan7_min.dev_attr.attr, &sensor_dev_attr_fan7_alarm.dev_attr.attr, &sensor_dev_attr_fan8_input.dev_attr.attr, &sensor_dev_attr_fan8_div.dev_attr.attr, &sensor_dev_attr_fan8_min.dev_attr.attr, &sensor_dev_attr_fan8_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &dev_attr_temp1_crit_enable.attr, &dev_attr_temp2_crit_enable.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, &dev_attr_alarms.attr, &dev_attr_alarm_mask.attr, &dev_attr_gpio.attr, &dev_attr_gpio_mask.attr, &dev_attr_pwm1.attr, &dev_attr_pwm2.attr, &dev_attr_pwm3.attr, &dev_attr_pwm1_enable.attr, &dev_attr_pwm2_enable.attr, &dev_attr_pwm3_enable.attr, &dev_attr_temp1_auto_point1_pwm.attr, &dev_attr_temp2_auto_point1_pwm.attr, &dev_attr_temp1_auto_point2_pwm.attr, &dev_attr_temp2_auto_point2_pwm.attr, &dev_attr_analog_out.attr, NULL }; static const struct attribute_group adm1026_group = { .attrs = adm1026_attributes, }; static struct attribute *adm1026_attributes_temp3[] = { &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_temp3_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &dev_attr_temp3_crit_enable.attr, &dev_attr_temp3_auto_point1_pwm.attr, &dev_attr_temp3_auto_point2_pwm.attr, NULL }; static const struct attribute_group adm1026_group_temp3 = { .attrs = adm1026_attributes_temp3, }; static struct attribute *adm1026_attributes_in8_9[] = { &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in8_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in9_alarm.dev_attr.attr, NULL }; static const struct attribute_group adm1026_group_in8_9 = { .attrs = adm1026_attributes_in8_9, }; /* Return 0 if detection is successful, -ENODEV otherwise */ static int adm1026_detect(struct i2c_client *client, int kind, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int address = client->addr; int company, verstep; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { /* We need to be able to do byte I/O */ return -ENODEV; }; /* Now, we do the remaining detection. */ company = adm1026_read_value(client, ADM1026_REG_COMPANY); verstep = adm1026_read_value(client, ADM1026_REG_VERSTEP); dev_dbg(&adapter->dev, "Detecting device at %d,0x%02x with" " COMPANY: 0x%02x and VERSTEP: 0x%02x\n", i2c_adapter_id(client->adapter), client->addr, company, verstep); /* If auto-detecting, Determine the chip type. */ if (kind <= 0) { dev_dbg(&adapter->dev, "Autodetecting device at %d,0x%02x " "...\n", i2c_adapter_id(adapter), address); if (company == ADM1026_COMPANY_ANALOG_DEV && verstep == ADM1026_VERSTEP_ADM1026) { kind = adm1026; } else if (company == ADM1026_COMPANY_ANALOG_DEV && (verstep & 0xf0) == ADM1026_VERSTEP_GENERIC) { dev_err(&adapter->dev, "Unrecognized stepping " "0x%02x. Defaulting to ADM1026.\n", verstep); kind = adm1026; } else if ((verstep & 0xf0) == ADM1026_VERSTEP_GENERIC) { dev_err(&adapter->dev, "Found version/stepping " "0x%02x. Assuming generic ADM1026.\n", verstep); kind = any_chip; } else { dev_dbg(&adapter->dev, "Autodetection failed\n"); /* Not an ADM1026 ... */ if (kind == 0) { /* User used force=x,y */ dev_err(&adapter->dev, "Generic ADM1026 not " "found at %d,0x%02x. Try " "force_adm1026.\n", i2c_adapter_id(adapter), address); } return -ENODEV; } } strlcpy(info->type, "adm1026", I2C_NAME_SIZE); return 0; } static int adm1026_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adm1026_data *data; int err; data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Set the VRM version */ data->vrm = vid_which_vrm(); /* Initialize the ADM1026 chip */ adm1026_init_client(client); /* Register sysfs hooks */ if ((err = sysfs_create_group(&client->dev.kobj, &adm1026_group))) goto exitfree; if (data->config1 & CFG1_AIN8_9) err = sysfs_create_group(&client->dev.kobj, &adm1026_group_in8_9); else err = sysfs_create_group(&client->dev.kobj, &adm1026_group_temp3); if (err) goto exitremove; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exitremove; } return 0; /* Error out and cleanup code */ exitremove: sysfs_remove_group(&client->dev.kobj, &adm1026_group); if (data->config1 & CFG1_AIN8_9) sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); else sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); exitfree: kfree(data); exit: return err; } static int adm1026_remove(struct i2c_client *client) { struct adm1026_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &adm1026_group); if (data->config1 & CFG1_AIN8_9) sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9); else sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3); kfree(data); return 0; } static int __init sm_adm1026_init(void) { return i2c_add_driver(&adm1026_driver); } static void __exit sm_adm1026_exit(void) { i2c_del_driver(&adm1026_driver); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Philip Pokorny <ppokorny@penguincomputing.com>, " "Justin Thiessen <jthiessen@penguincomputing.com>"); MODULE_DESCRIPTION("ADM1026 driver"); module_init(sm_adm1026_init); module_exit(sm_adm1026_exit);
gpl-2.0
Radium-Devices/Radium_yu
drivers/acpi/acpica/utxferror.c
2137
15333
/******************************************************************************* * * Module Name: utxferror - Various error/warning output functions * ******************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utxferror") /* * This module is used for the in-kernel ACPICA as well as the ACPICA * tools/applications. * * For the iASL compiler case, the output is redirected to stderr so that * any of the various ACPI errors and warnings do not appear in the output * files, for either the compiler or disassembler portions of the tool. */ #ifdef ACPI_ASL_COMPILER #include <stdio.h> extern FILE *acpi_gbl_output_file; #define ACPI_MSG_REDIRECT_BEGIN \ FILE *output_file = acpi_gbl_output_file; \ acpi_os_redirect_output (stderr); #define ACPI_MSG_REDIRECT_END \ acpi_os_redirect_output (output_file); #else /* * non-iASL case - no redirection, nothing to do */ #define ACPI_MSG_REDIRECT_BEGIN #define ACPI_MSG_REDIRECT_END #endif /* * Common message prefixes */ #define ACPI_MSG_ERROR "ACPI Error: " #define ACPI_MSG_EXCEPTION "ACPI Exception: " #define ACPI_MSG_WARNING "ACPI Warning: " #define ACPI_MSG_INFO "ACPI: " #define ACPI_MSG_BIOS_ERROR "ACPI BIOS Bug: Error: " #define ACPI_MSG_BIOS_WARNING "ACPI BIOS Bug: Warning: " /* * Common message suffix */ #define ACPI_MSG_SUFFIX \ acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number) /******************************************************************************* * * FUNCTION: acpi_error * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print "ACPI Error" message with module/line/version info * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_error(const char *module_name, u32 line_number, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_ERROR); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_error) /******************************************************************************* * * FUNCTION: acpi_exception * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * status - Status to be formatted * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print "ACPI Exception" message with module/line/version info * and decoded acpi_status. * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_exception(const char *module_name, u32 line_number, acpi_status status, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ", acpi_format_exception(status)); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_exception) /******************************************************************************* * * FUNCTION: acpi_warning * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print "ACPI Warning" message with module/line/version info * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_warning(const char *module_name, u32 line_number, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_WARNING); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_warning) /******************************************************************************* * * FUNCTION: acpi_info * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print generic "ACPI:" information message. There is no * module/line/version info in order to keep the message simple. * * TBD: module_name and line_number args are not needed, should be removed. * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *module_name, u32 line_number, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_INFO); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); acpi_os_printf("\n"); va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_info) /******************************************************************************* * * FUNCTION: acpi_bios_error * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print "ACPI Firmware Error" message with module/line/version * info * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_bios_error(const char *module_name, u32 line_number, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_BIOS_ERROR); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_bios_error) /******************************************************************************* * * FUNCTION: acpi_bios_warning * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Print "ACPI Firmware Warning" message with module/line/version * info * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_bios_warning(const char *module_name, u32 line_number, const char *format, ...) { va_list arg_list; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_BIOS_WARNING); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); ACPI_MSG_REDIRECT_END; } ACPI_EXPORT_SYMBOL(acpi_bios_warning) /* * The remainder of this module contains internal error functions that may * be configured out. */ #if !defined (ACPI_NO_ERROR_MESSAGES) && !defined (ACPI_BIN_APP) /******************************************************************************* * * FUNCTION: acpi_ut_predefined_warning * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * pathname - Full pathname to the node * node_flags - From Namespace node for the method/object * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Warnings for the predefined validation module. Messages are * only emitted the first time a problem with a particular * method/object is detected. This prevents a flood of error * messages for methods that are repeatedly evaluated. * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_warning(const char *module_name, u32 line_number, char *pathname, u8 node_flags, const char *format, ...) { va_list arg_list; /* * Warning messages for this method/object will be disabled after the * first time a validation fails or an object is successfully repaired. */ if (node_flags & ANOBJ_EVALUATED) { return; } acpi_os_printf(ACPI_MSG_WARNING "For %s: ", pathname); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); } /******************************************************************************* * * FUNCTION: acpi_ut_predefined_info * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * pathname - Full pathname to the node * node_flags - From Namespace node for the method/object * format - Printf format string + additional args * * RETURN: None * * DESCRIPTION: Info messages for the predefined validation module. Messages * are only emitted the first time a problem with a particular * method/object is detected. This prevents a flood of * messages for methods that are repeatedly evaluated. * ******************************************************************************/ void ACPI_INTERNAL_VAR_XFACE acpi_ut_predefined_info(const char *module_name, u32 line_number, char *pathname, u8 node_flags, const char *format, ...) { va_list arg_list; /* * Warning messages for this method/object will be disabled after the * first time a validation fails or an object is successfully repaired. */ if (node_flags & ANOBJ_EVALUATED) { return; } acpi_os_printf(ACPI_MSG_INFO "For %s: ", pathname); va_start(arg_list, format); acpi_os_vprintf(format, arg_list); ACPI_MSG_SUFFIX; va_end(arg_list); } /******************************************************************************* * * FUNCTION: acpi_ut_namespace_error * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * internal_name - Name or path of the namespace node * lookup_status - Exception code from NS lookup * * RETURN: None * * DESCRIPTION: Print error message with the full pathname for the NS node. * ******************************************************************************/ void acpi_ut_namespace_error(const char *module_name, u32 line_number, const char *internal_name, acpi_status lookup_status) { acpi_status status; u32 bad_name; char *name = NULL; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_ERROR); if (lookup_status == AE_BAD_CHARACTER) { /* There is a non-ascii character in the name */ ACPI_MOVE_32_TO_32(&bad_name, ACPI_CAST_PTR(u32, internal_name)); acpi_os_printf("[0x%.8X] (NON-ASCII)", bad_name); } else { /* Convert path to external format */ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_name, NULL, &name); /* Print target name */ if (ACPI_SUCCESS(status)) { acpi_os_printf("[%s]", name); } else { acpi_os_printf("[COULD NOT EXTERNALIZE NAME]"); } if (name) { ACPI_FREE(name); } } acpi_os_printf(" Namespace lookup failure, %s", acpi_format_exception(lookup_status)); ACPI_MSG_SUFFIX; ACPI_MSG_REDIRECT_END; } /******************************************************************************* * * FUNCTION: acpi_ut_method_error * * PARAMETERS: module_name - Caller's module name (for error output) * line_number - Caller's line number (for error output) * message - Error message to use on failure * prefix_node - Prefix relative to the path * path - Path to the node (optional) * method_status - Execution status * * RETURN: None * * DESCRIPTION: Print error message with the full pathname for the method. * ******************************************************************************/ void acpi_ut_method_error(const char *module_name, u32 line_number, const char *message, struct acpi_namespace_node *prefix_node, const char *path, acpi_status method_status) { acpi_status status; struct acpi_namespace_node *node = prefix_node; ACPI_MSG_REDIRECT_BEGIN; acpi_os_printf(ACPI_MSG_ERROR); if (path) { status = acpi_ns_get_node(prefix_node, path, ACPI_NS_NO_UPSEARCH, &node); if (ACPI_FAILURE(status)) { acpi_os_printf("[Could not get node by pathname]"); } } acpi_ns_print_node_pathname(node, message); acpi_os_printf(", %s", acpi_format_exception(method_status)); ACPI_MSG_SUFFIX; ACPI_MSG_REDIRECT_END; } #endif /* ACPI_NO_ERROR_MESSAGES */
gpl-2.0
codekidX/android_kernel_frost_i9082
tools/perf/util/python.c
2393
26050
#include <Python.h> #include <structmember.h> #include <inttypes.h> #include <poll.h> #include "evlist.h" #include "evsel.h" #include "event.h" #include "cpumap.h" #include "thread_map.h" /* Define PyVarObject_HEAD_INIT for python 2.5 */ #ifndef PyVarObject_HEAD_INIT # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, #endif struct throttle_event { struct perf_event_header header; u64 time; u64 id; u64 stream_id; }; PyMODINIT_FUNC initperf(void); #define member_def(type, member, ptype, help) \ { #member, ptype, \ offsetof(struct pyrf_event, event) + offsetof(struct type, member), \ 0, help } #define sample_member_def(name, member, ptype, help) \ { #name, ptype, \ offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \ 0, help } struct pyrf_event { PyObject_HEAD struct perf_sample sample; union perf_event event; }; #define sample_members \ sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ sample_member_def(sample_pid, pid, T_INT, "event pid"), \ sample_member_def(sample_tid, tid, T_INT, "event tid"), \ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \ sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \ sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \ sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \ sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"), static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object."); static PyMemberDef pyrf_mmap_event__members[] = { sample_members member_def(perf_event_header, type, T_UINT, "event type"), member_def(mmap_event, pid, T_UINT, "event pid"), member_def(mmap_event, tid, T_UINT, "event tid"), member_def(mmap_event, start, T_ULONGLONG, "start of the map"), member_def(mmap_event, len, T_ULONGLONG, "map length"), member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"), member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"), { .name = NULL, }, }; static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent) { PyObject *ret; char *s; if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", " "length: %#" PRIx64 ", offset: %#" PRIx64 ", " "filename: %s }", pevent->event.mmap.pid, pevent->event.mmap.tid, pevent->event.mmap.start, pevent->event.mmap.len, pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) { ret = PyErr_NoMemory(); } else { ret = PyString_FromString(s); free(s); } return ret; } static PyTypeObject pyrf_mmap_event__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.mmap_event", .tp_basicsize = sizeof(struct pyrf_event), .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_mmap_event__doc, .tp_members = pyrf_mmap_event__members, .tp_repr = (reprfunc)pyrf_mmap_event__repr, }; static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object."); static PyMemberDef pyrf_task_event__members[] = { sample_members member_def(perf_event_header, type, T_UINT, "event type"), member_def(fork_event, pid, T_UINT, "event pid"), member_def(fork_event, ppid, T_UINT, "event ppid"), member_def(fork_event, tid, T_UINT, "event tid"), member_def(fork_event, ptid, T_UINT, "event ptid"), member_def(fork_event, time, T_ULONGLONG, "timestamp"), { .name = NULL, }, }; static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent) { return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, " "ptid: %u, time: %" PRIu64 "}", pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit", pevent->event.fork.pid, pevent->event.fork.ppid, pevent->event.fork.tid, pevent->event.fork.ptid, pevent->event.fork.time); } static PyTypeObject pyrf_task_event__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.task_event", .tp_basicsize = sizeof(struct pyrf_event), .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_task_event__doc, .tp_members = pyrf_task_event__members, .tp_repr = (reprfunc)pyrf_task_event__repr, }; static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object."); static PyMemberDef pyrf_comm_event__members[] = { sample_members member_def(perf_event_header, type, T_UINT, "event type"), member_def(comm_event, pid, T_UINT, "event pid"), member_def(comm_event, tid, T_UINT, "event tid"), member_def(comm_event, comm, T_STRING_INPLACE, "process name"), { .name = NULL, }, }; static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent) { return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }", pevent->event.comm.pid, pevent->event.comm.tid, pevent->event.comm.comm); } static PyTypeObject pyrf_comm_event__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.comm_event", .tp_basicsize = sizeof(struct pyrf_event), .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_comm_event__doc, .tp_members = pyrf_comm_event__members, .tp_repr = (reprfunc)pyrf_comm_event__repr, }; static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object."); static PyMemberDef pyrf_throttle_event__members[] = { sample_members member_def(perf_event_header, type, T_UINT, "event type"), member_def(throttle_event, time, T_ULONGLONG, "timestamp"), member_def(throttle_event, id, T_ULONGLONG, "event id"), member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"), { .name = NULL, }, }; static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent) { struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1); return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64 ", stream_id: %" PRIu64 " }", pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un", te->time, te->id, te->stream_id); } static PyTypeObject pyrf_throttle_event__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.throttle_event", .tp_basicsize = sizeof(struct pyrf_event), .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_throttle_event__doc, .tp_members = pyrf_throttle_event__members, .tp_repr = (reprfunc)pyrf_throttle_event__repr, }; static int pyrf_event__setup_types(void) { int err; pyrf_mmap_event__type.tp_new = pyrf_task_event__type.tp_new = pyrf_comm_event__type.tp_new = pyrf_throttle_event__type.tp_new = PyType_GenericNew; err = PyType_Ready(&pyrf_mmap_event__type); if (err < 0) goto out; err = PyType_Ready(&pyrf_task_event__type); if (err < 0) goto out; err = PyType_Ready(&pyrf_comm_event__type); if (err < 0) goto out; err = PyType_Ready(&pyrf_throttle_event__type); if (err < 0) goto out; out: return err; } static PyTypeObject *pyrf_event__type[] = { [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, [PERF_RECORD_LOST] = &pyrf_mmap_event__type, [PERF_RECORD_COMM] = &pyrf_comm_event__type, [PERF_RECORD_EXIT] = &pyrf_task_event__type, [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, [PERF_RECORD_FORK] = &pyrf_task_event__type, [PERF_RECORD_READ] = &pyrf_mmap_event__type, [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type, }; static PyObject *pyrf_event__new(union perf_event *event) { struct pyrf_event *pevent; PyTypeObject *ptype; if (event->header.type < PERF_RECORD_MMAP || event->header.type > PERF_RECORD_SAMPLE) return NULL; ptype = pyrf_event__type[event->header.type]; pevent = PyObject_New(struct pyrf_event, ptype); if (pevent != NULL) memcpy(&pevent->event, event, event->header.size); return (PyObject *)pevent; } struct pyrf_cpu_map { PyObject_HEAD struct cpu_map *cpus; }; static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, PyObject *args, PyObject *kwargs) { static char *kwlist[] = { "cpustr", NULL, NULL, }; char *cpustr = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", kwlist, &cpustr)) return -1; pcpus->cpus = cpu_map__new(cpustr); if (pcpus->cpus == NULL) return -1; return 0; } static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) { cpu_map__delete(pcpus->cpus); pcpus->ob_type->tp_free((PyObject*)pcpus); } static Py_ssize_t pyrf_cpu_map__length(PyObject *obj) { struct pyrf_cpu_map *pcpus = (void *)obj; return pcpus->cpus->nr; } static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i) { struct pyrf_cpu_map *pcpus = (void *)obj; if (i >= pcpus->cpus->nr) return NULL; return Py_BuildValue("i", pcpus->cpus->map[i]); } static PySequenceMethods pyrf_cpu_map__sequence_methods = { .sq_length = pyrf_cpu_map__length, .sq_item = pyrf_cpu_map__item, }; static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object."); static PyTypeObject pyrf_cpu_map__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.cpu_map", .tp_basicsize = sizeof(struct pyrf_cpu_map), .tp_dealloc = (destructor)pyrf_cpu_map__delete, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_cpu_map__doc, .tp_as_sequence = &pyrf_cpu_map__sequence_methods, .tp_init = (initproc)pyrf_cpu_map__init, }; static int pyrf_cpu_map__setup_types(void) { pyrf_cpu_map__type.tp_new = PyType_GenericNew; return PyType_Ready(&pyrf_cpu_map__type); } struct pyrf_thread_map { PyObject_HEAD struct thread_map *threads; }; static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, PyObject *args, PyObject *kwargs) { static char *kwlist[] = { "pid", "tid", NULL, NULL, }; int pid = -1, tid = -1; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, &pid, &tid)) return -1; pthreads->threads = thread_map__new(pid, tid); if (pthreads->threads == NULL) return -1; return 0; } static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) { thread_map__delete(pthreads->threads); pthreads->ob_type->tp_free((PyObject*)pthreads); } static Py_ssize_t pyrf_thread_map__length(PyObject *obj) { struct pyrf_thread_map *pthreads = (void *)obj; return pthreads->threads->nr; } static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i) { struct pyrf_thread_map *pthreads = (void *)obj; if (i >= pthreads->threads->nr) return NULL; return Py_BuildValue("i", pthreads->threads->map[i]); } static PySequenceMethods pyrf_thread_map__sequence_methods = { .sq_length = pyrf_thread_map__length, .sq_item = pyrf_thread_map__item, }; static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object."); static PyTypeObject pyrf_thread_map__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.thread_map", .tp_basicsize = sizeof(struct pyrf_thread_map), .tp_dealloc = (destructor)pyrf_thread_map__delete, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_thread_map__doc, .tp_as_sequence = &pyrf_thread_map__sequence_methods, .tp_init = (initproc)pyrf_thread_map__init, }; static int pyrf_thread_map__setup_types(void) { pyrf_thread_map__type.tp_new = PyType_GenericNew; return PyType_Ready(&pyrf_thread_map__type); } struct pyrf_evsel { PyObject_HEAD struct perf_evsel evsel; }; static int pyrf_evsel__init(struct pyrf_evsel *pevsel, PyObject *args, PyObject *kwargs) { struct perf_event_attr attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID, }; static char *kwlist[] = { "type", "config", "sample_freq", "sample_period", "sample_type", "read_format", "disabled", "inherit", "pinned", "exclusive", "exclude_user", "exclude_kernel", "exclude_hv", "exclude_idle", "mmap", "comm", "freq", "inherit_stat", "enable_on_exec", "task", "watermark", "precise_ip", "mmap_data", "sample_id_all", "wakeup_events", "bp_type", "bp_addr", "bp_len", NULL, NULL, }; u64 sample_period = 0; u32 disabled = 0, inherit = 0, pinned = 0, exclusive = 0, exclude_user = 0, exclude_kernel = 0, exclude_hv = 0, exclude_idle = 0, mmap = 0, comm = 0, freq = 1, inherit_stat = 0, enable_on_exec = 0, task = 0, watermark = 0, precise_ip = 0, mmap_data = 0, sample_id_all = 1; int idx = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist, &attr.type, &attr.config, &attr.sample_freq, &sample_period, &attr.sample_type, &attr.read_format, &disabled, &inherit, &pinned, &exclusive, &exclude_user, &exclude_kernel, &exclude_hv, &exclude_idle, &mmap, &comm, &freq, &inherit_stat, &enable_on_exec, &task, &watermark, &precise_ip, &mmap_data, &sample_id_all, &attr.wakeup_events, &attr.bp_type, &attr.bp_addr, &attr.bp_len, &idx)) return -1; /* union... */ if (sample_period != 0) { if (attr.sample_freq != 0) return -1; /* FIXME: throw right exception */ attr.sample_period = sample_period; } /* Bitfields */ attr.disabled = disabled; attr.inherit = inherit; attr.pinned = pinned; attr.exclusive = exclusive; attr.exclude_user = exclude_user; attr.exclude_kernel = exclude_kernel; attr.exclude_hv = exclude_hv; attr.exclude_idle = exclude_idle; attr.mmap = mmap; attr.comm = comm; attr.freq = freq; attr.inherit_stat = inherit_stat; attr.enable_on_exec = enable_on_exec; attr.task = task; attr.watermark = watermark; attr.precise_ip = precise_ip; attr.mmap_data = mmap_data; attr.sample_id_all = sample_id_all; perf_evsel__init(&pevsel->evsel, &attr, idx); return 0; } static void pyrf_evsel__delete(struct pyrf_evsel *pevsel) { perf_evsel__exit(&pevsel->evsel); pevsel->ob_type->tp_free((PyObject*)pevsel); } static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, PyObject *args, PyObject *kwargs) { struct perf_evsel *evsel = &pevsel->evsel; struct cpu_map *cpus = NULL; struct thread_map *threads = NULL; PyObject *pcpus = NULL, *pthreads = NULL; int group = 0, inherit = 0; static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &pcpus, &pthreads, &group, &inherit)) return NULL; if (pthreads != NULL) threads = ((struct pyrf_thread_map *)pthreads)->threads; if (pcpus != NULL) cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; evsel->attr.inherit = inherit; if (perf_evsel__open(evsel, cpus, threads, group) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyMethodDef pyrf_evsel__methods[] = { { .ml_name = "open", .ml_meth = (PyCFunction)pyrf_evsel__open, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("open the event selector file descriptor table.") }, { .ml_name = NULL, } }; static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object."); static PyTypeObject pyrf_evsel__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.evsel", .tp_basicsize = sizeof(struct pyrf_evsel), .tp_dealloc = (destructor)pyrf_evsel__delete, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_doc = pyrf_evsel__doc, .tp_methods = pyrf_evsel__methods, .tp_init = (initproc)pyrf_evsel__init, }; static int pyrf_evsel__setup_types(void) { pyrf_evsel__type.tp_new = PyType_GenericNew; return PyType_Ready(&pyrf_evsel__type); } struct pyrf_evlist { PyObject_HEAD struct perf_evlist evlist; }; static int pyrf_evlist__init(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs __used) { PyObject *pcpus = NULL, *pthreads = NULL; struct cpu_map *cpus; struct thread_map *threads; if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) return -1; threads = ((struct pyrf_thread_map *)pthreads)->threads; cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; perf_evlist__init(&pevlist->evlist, cpus, threads); return 0; } static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) { perf_evlist__exit(&pevlist->evlist); pevlist->ob_type->tp_free((PyObject*)pevlist); } static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs) { struct perf_evlist *evlist = &pevlist->evlist; static char *kwlist[] = {"pages", "overwrite", NULL, NULL}; int pages = 128, overwrite = false; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, &pages, &overwrite)) return NULL; if (perf_evlist__mmap(evlist, pages, overwrite) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs) { struct perf_evlist *evlist = &pevlist->evlist; static char *kwlist[] = {"timeout", NULL, NULL}; int timeout = -1, n; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) return NULL; n = poll(evlist->pollfd, evlist->nr_fds, timeout); if (n < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } return Py_BuildValue("i", n); } static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, PyObject *args __used, PyObject *kwargs __used) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *list = PyList_New(0); int i; for (i = 0; i < evlist->nr_fds; ++i) { PyObject *file; FILE *fp = fdopen(evlist->pollfd[i].fd, "r"); if (fp == NULL) goto free_list; file = PyFile_FromFile(fp, "perf", "r", NULL); if (file == NULL) goto free_list; if (PyList_Append(list, file) != 0) { Py_DECREF(file); goto free_list; } Py_DECREF(file); } return list; free_list: return PyErr_NoMemory(); } static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs __used) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *pevsel; struct perf_evsel *evsel; if (!PyArg_ParseTuple(args, "O", &pevsel)) return NULL; Py_INCREF(pevsel); evsel = &((struct pyrf_evsel *)pevsel)->evsel; evsel->idx = evlist->nr_entries; perf_evlist__add(evlist, evsel); return Py_BuildValue("i", evlist->nr_entries); } static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs) { struct perf_evlist *evlist = &pevlist->evlist; union perf_event *event; int sample_id_all = 1, cpu; static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL}; int err; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, &cpu, &sample_id_all)) return NULL; event = perf_evlist__mmap_read(evlist, cpu); if (event != NULL) { struct perf_evsel *first; PyObject *pyevent = pyrf_event__new(event); struct pyrf_event *pevent = (struct pyrf_event *)pyevent; if (pyevent == NULL) return PyErr_NoMemory(); first = list_entry(evlist->entries.next, struct perf_evsel, node); err = perf_event__parse_sample(event, first->attr.sample_type, perf_evsel__sample_size(first), sample_id_all, &pevent->sample); if (err) return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); return pyevent; } Py_INCREF(Py_None); return Py_None; } static PyMethodDef pyrf_evlist__methods[] = { { .ml_name = "mmap", .ml_meth = (PyCFunction)pyrf_evlist__mmap, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("mmap the file descriptor table.") }, { .ml_name = "poll", .ml_meth = (PyCFunction)pyrf_evlist__poll, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("poll the file descriptor table.") }, { .ml_name = "get_pollfd", .ml_meth = (PyCFunction)pyrf_evlist__get_pollfd, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("get the poll file descriptor table.") }, { .ml_name = "add", .ml_meth = (PyCFunction)pyrf_evlist__add, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("adds an event selector to the list.") }, { .ml_name = "read_on_cpu", .ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu, .ml_flags = METH_VARARGS | METH_KEYWORDS, .ml_doc = PyDoc_STR("reads an event.") }, { .ml_name = NULL, } }; static Py_ssize_t pyrf_evlist__length(PyObject *obj) { struct pyrf_evlist *pevlist = (void *)obj; return pevlist->evlist.nr_entries; } static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i) { struct pyrf_evlist *pevlist = (void *)obj; struct perf_evsel *pos; if (i >= pevlist->evlist.nr_entries) return NULL; list_for_each_entry(pos, &pevlist->evlist.entries, node) if (i-- == 0) break; return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel)); } static PySequenceMethods pyrf_evlist__sequence_methods = { .sq_length = pyrf_evlist__length, .sq_item = pyrf_evlist__item, }; static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object."); static PyTypeObject pyrf_evlist__type = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "perf.evlist", .tp_basicsize = sizeof(struct pyrf_evlist), .tp_dealloc = (destructor)pyrf_evlist__delete, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, .tp_as_sequence = &pyrf_evlist__sequence_methods, .tp_doc = pyrf_evlist__doc, .tp_methods = pyrf_evlist__methods, .tp_init = (initproc)pyrf_evlist__init, }; static int pyrf_evlist__setup_types(void) { pyrf_evlist__type.tp_new = PyType_GenericNew; return PyType_Ready(&pyrf_evlist__type); } static struct { const char *name; int value; } perf__constants[] = { { "TYPE_HARDWARE", PERF_TYPE_HARDWARE }, { "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE }, { "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT }, { "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE }, { "TYPE_RAW", PERF_TYPE_RAW }, { "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT }, { "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES }, { "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS }, { "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES }, { "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES }, { "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, { "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES }, { "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES }, { "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D }, { "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I }, { "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL }, { "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB }, { "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB }, { "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU }, { "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ }, { "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE }, { "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH }, { "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS }, { "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS }, { "COUNT_HW_STALLED_CYCLES_FRONTEND", PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, { "COUNT_HW_STALLED_CYCLES_BACKEND", PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, { "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK }, { "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK }, { "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS }, { "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES }, { "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS }, { "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN }, { "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ }, { "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS }, { "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS }, { "SAMPLE_IP", PERF_SAMPLE_IP }, { "SAMPLE_TID", PERF_SAMPLE_TID }, { "SAMPLE_TIME", PERF_SAMPLE_TIME }, { "SAMPLE_ADDR", PERF_SAMPLE_ADDR }, { "SAMPLE_READ", PERF_SAMPLE_READ }, { "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN }, { "SAMPLE_ID", PERF_SAMPLE_ID }, { "SAMPLE_CPU", PERF_SAMPLE_CPU }, { "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD }, { "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID }, { "SAMPLE_RAW", PERF_SAMPLE_RAW }, { "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED }, { "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING }, { "FORMAT_ID", PERF_FORMAT_ID }, { "FORMAT_GROUP", PERF_FORMAT_GROUP }, { "RECORD_MMAP", PERF_RECORD_MMAP }, { "RECORD_LOST", PERF_RECORD_LOST }, { "RECORD_COMM", PERF_RECORD_COMM }, { "RECORD_EXIT", PERF_RECORD_EXIT }, { "RECORD_THROTTLE", PERF_RECORD_THROTTLE }, { "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE }, { "RECORD_FORK", PERF_RECORD_FORK }, { "RECORD_READ", PERF_RECORD_READ }, { "RECORD_SAMPLE", PERF_RECORD_SAMPLE }, { .name = NULL, }, }; static PyMethodDef perf__methods[] = { { .ml_name = NULL, } }; PyMODINIT_FUNC initperf(void) { PyObject *obj; int i; PyObject *dict, *module = Py_InitModule("perf", perf__methods); if (module == NULL || pyrf_event__setup_types() < 0 || pyrf_evlist__setup_types() < 0 || pyrf_evsel__setup_types() < 0 || pyrf_thread_map__setup_types() < 0 || pyrf_cpu_map__setup_types() < 0) return; Py_INCREF(&pyrf_evlist__type); PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type); Py_INCREF(&pyrf_evsel__type); PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type); Py_INCREF(&pyrf_thread_map__type); PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type); Py_INCREF(&pyrf_cpu_map__type); PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type); dict = PyModule_GetDict(module); if (dict == NULL) goto error; for (i = 0; perf__constants[i].name != NULL; i++) { obj = PyInt_FromLong(perf__constants[i].value); if (obj == NULL) goto error; PyDict_SetItemString(dict, perf__constants[i].name, obj); Py_DECREF(obj); } error: if (PyErr_Occurred()) PyErr_SetString(PyExc_ImportError, "perf: Init failed!"); }
gpl-2.0
bayger/kernel_amlogic
tools/perf/util/probe-finder.c
2393
51454
/* * probe-finder.c : C expression to kprobe event converter * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include <getopt.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <ctype.h> #include <dwarf-regs.h> #include <linux/bitops.h> #include "event.h" #include "debug.h" #include "util.h" #include "symbol.h" #include "probe-finder.h" /* Kprobe tracer basic type is up to u64 */ #define MAX_BASIC_TYPE_BITS 64 /* * Compare the tail of two strings. * Return 0 if whole of either string is same as another's tail part. */ static int strtailcmp(const char *s1, const char *s2) { int i1 = strlen(s1); int i2 = strlen(s2); while (--i1 >= 0 && --i2 >= 0) { if (s1[i1] != s2[i2]) return s1[i1] - s2[i2]; } return 0; } /* Line number list operations */ /* Add a line to line number list */ static int line_list__add_line(struct list_head *head, int line) { struct line_node *ln; struct list_head *p; /* Reverse search, because new line will be the last one */ list_for_each_entry_reverse(ln, head, list) { if (ln->line < line) { p = &ln->list; goto found; } else if (ln->line == line) /* Already exist */ return 1; } /* List is empty, or the smallest entry */ p = head; found: pr_debug("line list: add a line %u\n", line); ln = zalloc(sizeof(struct line_node)); if (ln == NULL) return -ENOMEM; ln->line = line; INIT_LIST_HEAD(&ln->list); list_add(&ln->list, p); return 0; } /* Check if the line in line number list */ static int line_list__has_line(struct list_head *head, int line) { struct line_node *ln; /* Reverse search, because new line will be the last one */ list_for_each_entry(ln, head, list) if (ln->line == line) return 1; return 0; } /* Init line number list */ static void line_list__init(struct list_head *head) { INIT_LIST_HEAD(head); } /* Free line number list */ static void line_list__free(struct list_head *head) { struct line_node *ln; while (!list_empty(head)) { ln = list_first_entry(head, struct line_node, list); list_del(&ln->list); free(ln); } } /* Dwarf FL wrappers */ static char *debuginfo_path; /* Currently dummy */ static const Dwfl_Callbacks offline_callbacks = { .find_debuginfo = dwfl_standard_find_debuginfo, .debuginfo_path = &debuginfo_path, .section_address = dwfl_offline_section_address, /* We use this table for core files too. */ .find_elf = dwfl_build_id_find_elf, }; /* Get a Dwarf from offline image */ static Dwarf *dwfl_init_offline_dwarf(int fd, Dwfl **dwflp, Dwarf_Addr *bias) { Dwfl_Module *mod; Dwarf *dbg = NULL; if (!dwflp) return NULL; *dwflp = dwfl_begin(&offline_callbacks); if (!*dwflp) return NULL; mod = dwfl_report_offline(*dwflp, "", "", fd); if (!mod) goto error; dbg = dwfl_module_getdwarf(mod, bias); if (!dbg) { error: dwfl_end(*dwflp); *dwflp = NULL; } return dbg; } #if _ELFUTILS_PREREQ(0, 148) /* This method is buggy if elfutils is older than 0.148 */ static int __linux_kernel_find_elf(Dwfl_Module *mod, void **userdata, const char *module_name, Dwarf_Addr base, char **file_name, Elf **elfp) { int fd; const char *path = kernel_get_module_path(module_name); pr_debug2("Use file %s for %s\n", path, module_name); if (path) { fd = open(path, O_RDONLY); if (fd >= 0) { *file_name = strdup(path); return fd; } } /* If failed, try to call standard method */ return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base, file_name, elfp); } static const Dwfl_Callbacks kernel_callbacks = { .find_debuginfo = dwfl_standard_find_debuginfo, .debuginfo_path = &debuginfo_path, .find_elf = __linux_kernel_find_elf, .section_address = dwfl_linux_kernel_module_section_address, }; /* Get a Dwarf from live kernel image */ static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr, Dwfl **dwflp, Dwarf_Addr *bias) { Dwarf *dbg; if (!dwflp) return NULL; *dwflp = dwfl_begin(&kernel_callbacks); if (!*dwflp) return NULL; /* Load the kernel dwarves: Don't care the result here */ dwfl_linux_kernel_report_kernel(*dwflp); dwfl_linux_kernel_report_modules(*dwflp); dbg = dwfl_addrdwarf(*dwflp, addr, bias); /* Here, check whether we could get a real dwarf */ if (!dbg) { pr_debug("Failed to find kernel dwarf at %lx\n", (unsigned long)addr); dwfl_end(*dwflp); *dwflp = NULL; } return dbg; } #else /* With older elfutils, this just support kernel module... */ static Dwarf *dwfl_init_live_kernel_dwarf(Dwarf_Addr addr __used, Dwfl **dwflp, Dwarf_Addr *bias) { int fd; const char *path = kernel_get_module_path("kernel"); if (!path) { pr_err("Failed to find vmlinux path\n"); return NULL; } pr_debug2("Use file %s for debuginfo\n", path); fd = open(path, O_RDONLY); if (fd < 0) return NULL; return dwfl_init_offline_dwarf(fd, dwflp, bias); } #endif /* Dwarf wrappers */ /* Find the realpath of the target file. */ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname) { Dwarf_Files *files; size_t nfiles, i; const char *src = NULL; int ret; if (!fname) return NULL; ret = dwarf_getsrcfiles(cu_die, &files, &nfiles); if (ret != 0) return NULL; for (i = 0; i < nfiles; i++) { src = dwarf_filesrc(files, i, NULL, NULL); if (strtailcmp(src, fname) == 0) break; } if (i == nfiles) return NULL; return src; } /* Get DW_AT_comp_dir (should be NULL with older gcc) */ static const char *cu_get_comp_dir(Dwarf_Die *cu_die) { Dwarf_Attribute attr; if (dwarf_attr(cu_die, DW_AT_comp_dir, &attr) == NULL) return NULL; return dwarf_formstring(&attr); } /* Get a line number and file name for given address */ static int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, const char **fname, int *lineno) { Dwarf_Line *line; Dwarf_Addr laddr; line = dwarf_getsrc_die(cudie, (Dwarf_Addr)addr); if (line && dwarf_lineaddr(line, &laddr) == 0 && addr == (unsigned long)laddr && dwarf_lineno(line, lineno) == 0) { *fname = dwarf_linesrc(line, NULL, NULL); if (!*fname) /* line number is useless without filename */ *lineno = 0; } return *lineno ?: -ENOENT; } /* Compare diename and tname */ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname) { const char *name; name = dwarf_diename(dw_die); return name ? (strcmp(tname, name) == 0) : false; } /* Get callsite line number of inline-function instance */ static int die_get_call_lineno(Dwarf_Die *in_die) { Dwarf_Attribute attr; Dwarf_Word ret; if (!dwarf_attr(in_die, DW_AT_call_line, &attr)) return -ENOENT; dwarf_formudata(&attr, &ret); return (int)ret; } /* Get type die */ static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) { Dwarf_Attribute attr; if (dwarf_attr_integrate(vr_die, DW_AT_type, &attr) && dwarf_formref_die(&attr, die_mem)) return die_mem; else return NULL; } /* Get a type die, but skip qualifiers */ static Dwarf_Die *__die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) { int tag; do { vr_die = die_get_type(vr_die, die_mem); if (!vr_die) break; tag = dwarf_tag(vr_die); } while (tag == DW_TAG_const_type || tag == DW_TAG_restrict_type || tag == DW_TAG_volatile_type || tag == DW_TAG_shared_type); return vr_die; } /* Get a type die, but skip qualifiers and typedef */ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem) { do { vr_die = __die_get_real_type(vr_die, die_mem); } while (vr_die && dwarf_tag(vr_die) == DW_TAG_typedef); return vr_die; } static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, Dwarf_Word *result) { Dwarf_Attribute attr; if (dwarf_attr(tp_die, attr_name, &attr) == NULL || dwarf_formudata(&attr, result) != 0) return -ENOENT; return 0; } static bool die_is_signed_type(Dwarf_Die *tp_die) { Dwarf_Word ret; if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret)) return false; return (ret == DW_ATE_signed_char || ret == DW_ATE_signed || ret == DW_ATE_signed_fixed); } static int die_get_byte_size(Dwarf_Die *tp_die) { Dwarf_Word ret; if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret)) return 0; return (int)ret; } static int die_get_bit_size(Dwarf_Die *tp_die) { Dwarf_Word ret; if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret)) return 0; return (int)ret; } static int die_get_bit_offset(Dwarf_Die *tp_die) { Dwarf_Word ret; if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret)) return 0; return (int)ret; } /* Get data_member_location offset */ static int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) { Dwarf_Attribute attr; Dwarf_Op *expr; size_t nexpr; int ret; if (dwarf_attr(mb_die, DW_AT_data_member_location, &attr) == NULL) return -ENOENT; if (dwarf_formudata(&attr, offs) != 0) { /* DW_AT_data_member_location should be DW_OP_plus_uconst */ ret = dwarf_getlocation(&attr, &expr, &nexpr); if (ret < 0 || nexpr == 0) return -ENOENT; if (expr[0].atom != DW_OP_plus_uconst || nexpr != 1) { pr_debug("Unable to get offset:Unexpected OP %x (%zd)\n", expr[0].atom, nexpr); return -ENOTSUP; } *offs = (Dwarf_Word)expr[0].number; } return 0; } /* Return values for die_find callbacks */ enum { DIE_FIND_CB_FOUND = 0, /* End of Search */ DIE_FIND_CB_CHILD = 1, /* Search only children */ DIE_FIND_CB_SIBLING = 2, /* Search only siblings */ DIE_FIND_CB_CONTINUE = 3, /* Search children and siblings */ }; /* Search a child die */ static Dwarf_Die *die_find_child(Dwarf_Die *rt_die, int (*callback)(Dwarf_Die *, void *), void *data, Dwarf_Die *die_mem) { Dwarf_Die child_die; int ret; ret = dwarf_child(rt_die, die_mem); if (ret != 0) return NULL; do { ret = callback(die_mem, data); if (ret == DIE_FIND_CB_FOUND) return die_mem; if ((ret & DIE_FIND_CB_CHILD) && die_find_child(die_mem, callback, data, &child_die)) { memcpy(die_mem, &child_die, sizeof(Dwarf_Die)); return die_mem; } } while ((ret & DIE_FIND_CB_SIBLING) && dwarf_siblingof(die_mem, die_mem) == 0); return NULL; } struct __addr_die_search_param { Dwarf_Addr addr; Dwarf_Die *die_mem; }; static int __die_search_func_cb(Dwarf_Die *fn_die, void *data) { struct __addr_die_search_param *ad = data; if (dwarf_tag(fn_die) == DW_TAG_subprogram && dwarf_haspc(fn_die, ad->addr)) { memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die)); return DWARF_CB_ABORT; } return DWARF_CB_OK; } /* Search a real subprogram including this line, */ static Dwarf_Die *die_find_real_subprogram(Dwarf_Die *cu_die, Dwarf_Addr addr, Dwarf_Die *die_mem) { struct __addr_die_search_param ad; ad.addr = addr; ad.die_mem = die_mem; /* dwarf_getscopes can't find subprogram. */ if (!dwarf_getfuncs(cu_die, __die_search_func_cb, &ad, 0)) return NULL; else return die_mem; } /* die_find callback for inline function search */ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data) { Dwarf_Addr *addr = data; if (dwarf_tag(die_mem) == DW_TAG_inlined_subroutine && dwarf_haspc(die_mem, *addr)) return DIE_FIND_CB_FOUND; return DIE_FIND_CB_CONTINUE; } /* Similar to dwarf_getfuncs, but returns inlined_subroutine if exists. */ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem) { Dwarf_Die tmp_die; sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die); if (!sp_die) return NULL; /* Inlined function could be recursive. Trace it until fail */ while (sp_die) { memcpy(die_mem, sp_die, sizeof(Dwarf_Die)); sp_die = die_find_child(sp_die, __die_find_inline_cb, &addr, &tmp_die); } return die_mem; } /* Walker on lines (Note: line number will not be sorted) */ typedef int (* line_walk_handler_t) (const char *fname, int lineno, Dwarf_Addr addr, void *data); struct __line_walk_param { const char *fname; line_walk_handler_t handler; void *data; int retval; }; static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) { struct __line_walk_param *lw = data; Dwarf_Addr addr; int lineno; if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { lineno = die_get_call_lineno(in_die); if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { lw->retval = lw->handler(lw->fname, lineno, addr, lw->data); if (lw->retval != 0) return DIE_FIND_CB_FOUND; } } return DIE_FIND_CB_SIBLING; } /* Walk on lines of blocks included in given DIE */ static int __die_walk_funclines(Dwarf_Die *sp_die, line_walk_handler_t handler, void *data) { struct __line_walk_param lw = { .handler = handler, .data = data, .retval = 0, }; Dwarf_Die die_mem; Dwarf_Addr addr; int lineno; /* Handle function declaration line */ lw.fname = dwarf_decl_file(sp_die); if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && dwarf_entrypc(sp_die, &addr) == 0) { lw.retval = handler(lw.fname, lineno, addr, data); if (lw.retval != 0) goto done; } die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem); done: return lw.retval; } static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) { struct __line_walk_param *lw = data; lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data); if (lw->retval != 0) return DWARF_CB_ABORT; return DWARF_CB_OK; } /* * Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on * the lines inside the subprogram, otherwise PDIE must be a CU DIE. */ static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler, void *data) { Dwarf_Lines *lines; Dwarf_Line *line; Dwarf_Addr addr; const char *fname; int lineno, ret = 0; Dwarf_Die die_mem, *cu_die; size_t nlines, i; /* Get the CU die */ if (dwarf_tag(pdie) == DW_TAG_subprogram) cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL); else cu_die = pdie; if (!cu_die) { pr_debug2("Failed to get CU from subprogram\n"); return -EINVAL; } /* Get lines list in the CU */ if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) { pr_debug2("Failed to get source lines on this CU.\n"); return -ENOENT; } pr_debug2("Get %zd lines from this CU\n", nlines); /* Walk on the lines on lines list */ for (i = 0; i < nlines; i++) { line = dwarf_onesrcline(lines, i); if (line == NULL || dwarf_lineno(line, &lineno) != 0 || dwarf_lineaddr(line, &addr) != 0) { pr_debug2("Failed to get line info. " "Possible error in debuginfo.\n"); continue; } /* Filter lines based on address */ if (pdie != cu_die) /* * Address filtering * The line is included in given function, and * no inline block includes it. */ if (!dwarf_haspc(pdie, addr) || die_find_inlinefunc(pdie, addr, &die_mem)) continue; /* Get source line */ fname = dwarf_linesrc(line, NULL, NULL); ret = handler(fname, lineno, addr, data); if (ret != 0) return ret; } /* * Dwarf lines doesn't include function declarations and inlined * subroutines. We have to check functions list or given function. */ if (pdie != cu_die) ret = __die_walk_funclines(pdie, handler, data); else { struct __line_walk_param param = { .handler = handler, .data = data, .retval = 0, }; dwarf_getfuncs(cu_die, __die_walk_culines_cb, &param, 0); ret = param.retval; } return ret; } struct __find_variable_param { const char *name; Dwarf_Addr addr; }; static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) { struct __find_variable_param *fvp = data; int tag; tag = dwarf_tag(die_mem); if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && die_compare_name(die_mem, fvp->name)) return DIE_FIND_CB_FOUND; if (dwarf_haspc(die_mem, fvp->addr)) return DIE_FIND_CB_CONTINUE; else return DIE_FIND_CB_SIBLING; } /* Find a variable called 'name' at given address */ static Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, Dwarf_Addr addr, Dwarf_Die *die_mem) { struct __find_variable_param fvp = { .name = name, .addr = addr}; return die_find_child(sp_die, __die_find_variable_cb, (void *)&fvp, die_mem); } static int __die_find_member_cb(Dwarf_Die *die_mem, void *data) { const char *name = data; if ((dwarf_tag(die_mem) == DW_TAG_member) && die_compare_name(die_mem, name)) return DIE_FIND_CB_FOUND; return DIE_FIND_CB_SIBLING; } /* Find a member called 'name' */ static Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, Dwarf_Die *die_mem) { return die_find_child(st_die, __die_find_member_cb, (void *)name, die_mem); } /* Get the name of given variable DIE */ static int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) { Dwarf_Die type; int tag, ret, ret2; const char *tmp = ""; if (__die_get_real_type(vr_die, &type) == NULL) return -ENOENT; tag = dwarf_tag(&type); if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) tmp = "*"; else if (tag == DW_TAG_subroutine_type) { /* Function pointer */ ret = snprintf(buf, len, "(function_type)"); return (ret >= len) ? -E2BIG : ret; } else { if (!dwarf_diename(&type)) return -ENOENT; if (tag == DW_TAG_union_type) tmp = "union "; else if (tag == DW_TAG_structure_type) tmp = "struct "; /* Write a base name */ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); return (ret >= len) ? -E2BIG : ret; } ret = die_get_typename(&type, buf, len); if (ret > 0) { ret2 = snprintf(buf + ret, len - ret, "%s", tmp); ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; } return ret; } /* Get the name and type of given variable DIE, stored as "type\tname" */ static int die_get_varname(Dwarf_Die *vr_die, char *buf, int len) { int ret, ret2; ret = die_get_typename(vr_die, buf, len); if (ret < 0) { pr_debug("Failed to get type, make it unknown.\n"); ret = snprintf(buf, len, "(unknown_type)"); } if (ret > 0) { ret2 = snprintf(buf + ret, len - ret, "\t%s", dwarf_diename(vr_die)); ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret; } return ret; } /* * Probe finder related functions */ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs) { struct probe_trace_arg_ref *ref; ref = zalloc(sizeof(struct probe_trace_arg_ref)); if (ref != NULL) ref->offset = offs; return ref; } /* * Convert a location into trace_arg. * If tvar == NULL, this just checks variable can be converted. */ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, Dwarf_Op *fb_ops, struct probe_trace_arg *tvar) { Dwarf_Attribute attr; Dwarf_Op *op; size_t nops; unsigned int regn; Dwarf_Word offs = 0; bool ref = false; const char *regs; int ret; if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL) goto static_var; /* TODO: handle more than 1 exprs */ if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL || dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0 || nops == 0) { /* TODO: Support const_value */ return -ENOENT; } if (op->atom == DW_OP_addr) { static_var: if (!tvar) return 0; /* Static variables on memory (not stack), make @varname */ ret = strlen(dwarf_diename(vr_die)); tvar->value = zalloc(ret + 2); if (tvar->value == NULL) return -ENOMEM; snprintf(tvar->value, ret + 2, "@%s", dwarf_diename(vr_die)); tvar->ref = alloc_trace_arg_ref((long)offs); if (tvar->ref == NULL) return -ENOMEM; return 0; } /* If this is based on frame buffer, set the offset */ if (op->atom == DW_OP_fbreg) { if (fb_ops == NULL) return -ENOTSUP; ref = true; offs = op->number; op = &fb_ops[0]; } if (op->atom >= DW_OP_breg0 && op->atom <= DW_OP_breg31) { regn = op->atom - DW_OP_breg0; offs += op->number; ref = true; } else if (op->atom >= DW_OP_reg0 && op->atom <= DW_OP_reg31) { regn = op->atom - DW_OP_reg0; } else if (op->atom == DW_OP_bregx) { regn = op->number; offs += op->number2; ref = true; } else if (op->atom == DW_OP_regx) { regn = op->number; } else { pr_debug("DW_OP %x is not supported.\n", op->atom); return -ENOTSUP; } if (!tvar) return 0; regs = get_arch_regstr(regn); if (!regs) { /* This should be a bug in DWARF or this tool */ pr_warning("Mapping for the register number %u " "missing on this architecture.\n", regn); return -ERANGE; } tvar->value = strdup(regs); if (tvar->value == NULL) return -ENOMEM; if (ref) { tvar->ref = alloc_trace_arg_ref((long)offs); if (tvar->ref == NULL) return -ENOMEM; } return 0; } #define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) static int convert_variable_type(Dwarf_Die *vr_die, struct probe_trace_arg *tvar, const char *cast) { struct probe_trace_arg_ref **ref_ptr = &tvar->ref; Dwarf_Die type; char buf[16]; int ret; /* TODO: check all types */ if (cast && strcmp(cast, "string") != 0) { /* Non string type is OK */ tvar->type = strdup(cast); return (tvar->type == NULL) ? -ENOMEM : 0; } if (die_get_bit_size(vr_die) != 0) { /* This is a bitfield */ ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die), die_get_bit_offset(vr_die), BYTES_TO_BITS(die_get_byte_size(vr_die))); goto formatted; } if (die_get_real_type(vr_die, &type) == NULL) { pr_warning("Failed to get a type information of %s.\n", dwarf_diename(vr_die)); return -ENOENT; } pr_debug("%s type is %s.\n", dwarf_diename(vr_die), dwarf_diename(&type)); if (cast && strcmp(cast, "string") == 0) { /* String type */ ret = dwarf_tag(&type); if (ret != DW_TAG_pointer_type && ret != DW_TAG_array_type) { pr_warning("Failed to cast into string: " "%s(%s) is not a pointer nor array.\n", dwarf_diename(vr_die), dwarf_diename(&type)); return -EINVAL; } if (ret == DW_TAG_pointer_type) { if (die_get_real_type(&type, &type) == NULL) { pr_warning("Failed to get a type" " information.\n"); return -ENOENT; } while (*ref_ptr) ref_ptr = &(*ref_ptr)->next; /* Add new reference with offset +0 */ *ref_ptr = zalloc(sizeof(struct probe_trace_arg_ref)); if (*ref_ptr == NULL) { pr_warning("Out of memory error\n"); return -ENOMEM; } } if (!die_compare_name(&type, "char") && !die_compare_name(&type, "unsigned char")) { pr_warning("Failed to cast into string: " "%s is not (unsigned) char *.\n", dwarf_diename(vr_die)); return -EINVAL; } tvar->type = strdup(cast); return (tvar->type == NULL) ? -ENOMEM : 0; } ret = BYTES_TO_BITS(die_get_byte_size(&type)); if (!ret) /* No size ... try to use default type */ return 0; /* Check the bitwidth */ if (ret > MAX_BASIC_TYPE_BITS) { pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n", dwarf_diename(&type), MAX_BASIC_TYPE_BITS); ret = MAX_BASIC_TYPE_BITS; } ret = snprintf(buf, 16, "%c%d", die_is_signed_type(&type) ? 's' : 'u', ret); formatted: if (ret < 0 || ret >= 16) { if (ret >= 16) ret = -E2BIG; pr_warning("Failed to convert variable type: %s\n", strerror(-ret)); return ret; } tvar->type = strdup(buf); if (tvar->type == NULL) return -ENOMEM; return 0; } static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, struct perf_probe_arg_field *field, struct probe_trace_arg_ref **ref_ptr, Dwarf_Die *die_mem) { struct probe_trace_arg_ref *ref = *ref_ptr; Dwarf_Die type; Dwarf_Word offs; int ret, tag; pr_debug("converting %s in %s\n", field->name, varname); if (die_get_real_type(vr_die, &type) == NULL) { pr_warning("Failed to get the type of %s.\n", varname); return -ENOENT; } pr_debug2("Var real type: (%x)\n", (unsigned)dwarf_dieoffset(&type)); tag = dwarf_tag(&type); if (field->name[0] == '[' && (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)) { if (field->next) /* Save original type for next field */ memcpy(die_mem, &type, sizeof(*die_mem)); /* Get the type of this array */ if (die_get_real_type(&type, &type) == NULL) { pr_warning("Failed to get the type of %s.\n", varname); return -ENOENT; } pr_debug2("Array real type: (%x)\n", (unsigned)dwarf_dieoffset(&type)); if (tag == DW_TAG_pointer_type) { ref = zalloc(sizeof(struct probe_trace_arg_ref)); if (ref == NULL) return -ENOMEM; if (*ref_ptr) (*ref_ptr)->next = ref; else *ref_ptr = ref; } ref->offset += die_get_byte_size(&type) * field->index; if (!field->next) /* Save vr_die for converting types */ memcpy(die_mem, vr_die, sizeof(*die_mem)); goto next; } else if (tag == DW_TAG_pointer_type) { /* Check the pointer and dereference */ if (!field->ref) { pr_err("Semantic error: %s must be referred by '->'\n", field->name); return -EINVAL; } /* Get the type pointed by this pointer */ if (die_get_real_type(&type, &type) == NULL) { pr_warning("Failed to get the type of %s.\n", varname); return -ENOENT; } /* Verify it is a data structure */ if (dwarf_tag(&type) != DW_TAG_structure_type) { pr_warning("%s is not a data structure.\n", varname); return -EINVAL; } ref = zalloc(sizeof(struct probe_trace_arg_ref)); if (ref == NULL) return -ENOMEM; if (*ref_ptr) (*ref_ptr)->next = ref; else *ref_ptr = ref; } else { /* Verify it is a data structure */ if (tag != DW_TAG_structure_type) { pr_warning("%s is not a data structure.\n", varname); return -EINVAL; } if (field->name[0] == '[') { pr_err("Semantic error: %s is not a pointor" " nor array.\n", varname); return -EINVAL; } if (field->ref) { pr_err("Semantic error: %s must be referred by '.'\n", field->name); return -EINVAL; } if (!ref) { pr_warning("Structure on a register is not " "supported yet.\n"); return -ENOTSUP; } } if (die_find_member(&type, field->name, die_mem) == NULL) { pr_warning("%s(tyep:%s) has no member %s.\n", varname, dwarf_diename(&type), field->name); return -EINVAL; } /* Get the offset of the field */ ret = die_get_data_member_location(die_mem, &offs); if (ret < 0) { pr_warning("Failed to get the offset of %s.\n", field->name); return ret; } ref->offset += (long)offs; next: /* Converting next field */ if (field->next) return convert_variable_fields(die_mem, field->name, field->next, &ref, die_mem); else return 0; } /* Show a variables in kprobe event format */ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) { Dwarf_Die die_mem; int ret; pr_debug("Converting variable %s into trace event.\n", dwarf_diename(vr_die)); ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, pf->tvar); if (ret == -ENOENT) pr_err("Failed to find the location of %s at this address.\n" " Perhaps, it has been optimized out.\n", pf->pvar->var); else if (ret == -ENOTSUP) pr_err("Sorry, we don't support this variable location yet.\n"); else if (pf->pvar->field) { ret = convert_variable_fields(vr_die, pf->pvar->var, pf->pvar->field, &pf->tvar->ref, &die_mem); vr_die = &die_mem; } if (ret == 0) ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type); /* *expr will be cached in libdw. Don't free it. */ return ret; } /* Find a variable in a subprogram die */ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) { Dwarf_Die vr_die, *scopes; char buf[32], *ptr; int ret, nscopes; if (!is_c_varname(pf->pvar->var)) { /* Copy raw parameters */ pf->tvar->value = strdup(pf->pvar->var); if (pf->tvar->value == NULL) return -ENOMEM; if (pf->pvar->type) { pf->tvar->type = strdup(pf->pvar->type); if (pf->tvar->type == NULL) return -ENOMEM; } if (pf->pvar->name) { pf->tvar->name = strdup(pf->pvar->name); if (pf->tvar->name == NULL) return -ENOMEM; } else pf->tvar->name = NULL; return 0; } if (pf->pvar->name) pf->tvar->name = strdup(pf->pvar->name); else { ret = synthesize_perf_probe_arg(pf->pvar, buf, 32); if (ret < 0) return ret; ptr = strchr(buf, ':'); /* Change type separator to _ */ if (ptr) *ptr = '_'; pf->tvar->name = strdup(buf); } if (pf->tvar->name == NULL) return -ENOMEM; pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) ret = convert_variable(&vr_die, pf); else { /* Search upper class */ nscopes = dwarf_getscopes_die(sp_die, &scopes); while (nscopes-- > 1) { pr_debug("Searching variables in %s\n", dwarf_diename(&scopes[nscopes])); /* We should check this scope, so give dummy address */ if (die_find_variable_at(&scopes[nscopes], pf->pvar->var, 0, &vr_die)) { ret = convert_variable(&vr_die, pf); goto found; } } if (scopes) free(scopes); ret = -ENOENT; } found: if (ret < 0) pr_warning("Failed to find '%s' in this function.\n", pf->pvar->var); return ret; } /* Convert subprogram DIE to trace point */ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, bool retprobe, struct probe_trace_point *tp) { Dwarf_Addr eaddr; const char *name; /* Copy the name of probe point */ name = dwarf_diename(sp_die); if (name) { if (dwarf_entrypc(sp_die, &eaddr) != 0) { pr_warning("Failed to get entry address of %s\n", dwarf_diename(sp_die)); return -ENOENT; } tp->symbol = strdup(name); if (tp->symbol == NULL) return -ENOMEM; tp->offset = (unsigned long)(paddr - eaddr); } else /* This function has no name. */ tp->offset = (unsigned long)paddr; /* Return probe must be on the head of a subprogram */ if (retprobe) { if (eaddr != paddr) { pr_warning("Return probe must be on the head of" " a real function.\n"); return -EINVAL; } tp->retprobe = true; } return 0; } /* Call probe_finder callback with real subprogram DIE */ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) { Dwarf_Die die_mem; Dwarf_Attribute fb_attr; size_t nops; int ret; /* If no real subprogram, find a real one */ if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { sp_die = die_find_real_subprogram(&pf->cu_die, pf->addr, &die_mem); if (!sp_die) { pr_warning("Failed to find probe point in any " "functions.\n"); return -ENOENT; } } /* Get the frame base attribute/ops */ dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); if (ret <= 0 || nops == 0) { pf->fb_ops = NULL; #if _ELFUTILS_PREREQ(0, 142) } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && pf->cfi != NULL) { Dwarf_Frame *frame; if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { pr_warning("Failed to get call frame on 0x%jx\n", (uintmax_t)pf->addr); return -ENOENT; } #endif } /* Call finder's callback handler */ ret = pf->callback(sp_die, pf); /* *pf->fb_ops will be cached in libdw. Don't free it. */ pf->fb_ops = NULL; return ret; } static int probe_point_line_walker(const char *fname, int lineno, Dwarf_Addr addr, void *data) { struct probe_finder *pf = data; int ret; if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) return 0; pf->addr = addr; ret = call_probe_finder(NULL, pf); /* Continue if no error, because the line will be in inline function */ return ret < 0 ? ret : 0; } /* Find probe point from its line number */ static int find_probe_point_by_line(struct probe_finder *pf) { return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf); } /* Find lines which match lazy pattern */ static int find_lazy_match_lines(struct list_head *head, const char *fname, const char *pat) { FILE *fp; char *line = NULL; size_t line_len; ssize_t len; int count = 0, linenum = 1; fp = fopen(fname, "r"); if (!fp) { pr_warning("Failed to open %s: %s\n", fname, strerror(errno)); return -errno; } while ((len = getline(&line, &line_len, fp)) > 0) { if (line[len - 1] == '\n') line[len - 1] = '\0'; if (strlazymatch(line, pat)) { line_list__add_line(head, linenum); count++; } linenum++; } if (ferror(fp)) count = -errno; free(line); fclose(fp); if (count == 0) pr_debug("No matched lines found in %s.\n", fname); return count; } static int probe_point_lazy_walker(const char *fname, int lineno, Dwarf_Addr addr, void *data) { struct probe_finder *pf = data; int ret; if (!line_list__has_line(&pf->lcache, lineno) || strtailcmp(fname, pf->fname) != 0) return 0; pr_debug("Probe line found: line:%d addr:0x%llx\n", lineno, (unsigned long long)addr); pf->addr = addr; ret = call_probe_finder(NULL, pf); /* * Continue if no error, because the lazy pattern will match * to other lines */ return ret < 0 ? ret : 0; } /* Find probe points from lazy pattern */ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) { int ret = 0; if (list_empty(&pf->lcache)) { /* Matching lazy line pattern */ ret = find_lazy_match_lines(&pf->lcache, pf->fname, pf->pev->point.lazy_line); if (ret <= 0) return ret; } return die_walk_lines(sp_die, probe_point_lazy_walker, pf); } /* Callback parameter with return value */ struct dwarf_callback_param { void *data; int retval; }; static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) { struct dwarf_callback_param *param = data; struct probe_finder *pf = param->data; struct perf_probe_point *pp = &pf->pev->point; Dwarf_Addr addr; if (pp->lazy_line) param->retval = find_probe_point_lazy(in_die, pf); else { /* Get probe address */ if (dwarf_entrypc(in_die, &addr) != 0) { pr_warning("Failed to get entry address of %s.\n", dwarf_diename(in_die)); param->retval = -ENOENT; return DWARF_CB_ABORT; } pf->addr = addr; pf->addr += pp->offset; pr_debug("found inline addr: 0x%jx\n", (uintmax_t)pf->addr); param->retval = call_probe_finder(in_die, pf); if (param->retval < 0) return DWARF_CB_ABORT; } return DWARF_CB_OK; } /* Search function from function name */ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) { struct dwarf_callback_param *param = data; struct probe_finder *pf = param->data; struct perf_probe_point *pp = &pf->pev->point; /* Check tag and diename */ if (dwarf_tag(sp_die) != DW_TAG_subprogram || !die_compare_name(sp_die, pp->function)) return DWARF_CB_OK; /* Check declared file */ if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die))) return DWARF_CB_OK; pf->fname = dwarf_decl_file(sp_die); if (pp->line) { /* Function relative line */ dwarf_decl_line(sp_die, &pf->lno); pf->lno += pp->line; param->retval = find_probe_point_by_line(pf); } else if (!dwarf_func_inline(sp_die)) { /* Real function */ if (pp->lazy_line) param->retval = find_probe_point_lazy(sp_die, pf); else { if (dwarf_entrypc(sp_die, &pf->addr) != 0) { pr_warning("Failed to get entry address of " "%s.\n", dwarf_diename(sp_die)); param->retval = -ENOENT; return DWARF_CB_ABORT; } pf->addr += pp->offset; /* TODO: Check the address in this function */ param->retval = call_probe_finder(sp_die, pf); } } else { struct dwarf_callback_param _param = {.data = (void *)pf, .retval = 0}; /* Inlined function: search instances */ dwarf_func_inline_instances(sp_die, probe_point_inline_cb, &_param); param->retval = _param.retval; } return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ } static int find_probe_point_by_func(struct probe_finder *pf) { struct dwarf_callback_param _param = {.data = (void *)pf, .retval = 0}; dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0); return _param.retval; } struct pubname_callback_param { char *function; char *file; Dwarf_Die *cu_die; Dwarf_Die *sp_die; int found; }; static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) { struct pubname_callback_param *param = data; if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { if (dwarf_tag(param->sp_die) != DW_TAG_subprogram) return DWARF_CB_OK; if (die_compare_name(param->sp_die, param->function)) { if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die)) return DWARF_CB_OK; if (param->file && strtailcmp(param->file, dwarf_decl_file(param->sp_die))) return DWARF_CB_OK; param->found = 1; return DWARF_CB_ABORT; } } return DWARF_CB_OK; } /* Find probe points from debuginfo */ static int find_probes(int fd, struct probe_finder *pf) { struct perf_probe_point *pp = &pf->pev->point; Dwarf_Off off, noff; size_t cuhl; Dwarf_Die *diep; Dwarf *dbg = NULL; Dwfl *dwfl; Dwarf_Addr bias; /* Currently ignored */ int ret = 0; dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); if (!dbg) { pr_warning("No debug information found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); close(fd); /* Without dwfl_end(), fd isn't closed. */ return -EBADF; } #if _ELFUTILS_PREREQ(0, 142) /* Get the call frame information from this dwarf */ pf->cfi = dwarf_getcfi(dbg); #endif off = 0; line_list__init(&pf->lcache); /* Fastpath: lookup by function name from .debug_pubnames section */ if (pp->function) { struct pubname_callback_param pubname_param = { .function = pp->function, .file = pp->file, .cu_die = &pf->cu_die, .sp_die = &pf->sp_die, .found = 0, }; struct dwarf_callback_param probe_param = { .data = pf, }; dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); if (pubname_param.found) { ret = probe_point_search_cb(&pf->sp_die, &probe_param); if (ret) goto found; } } /* Loop on CUs (Compilation Unit) */ while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) { /* Get the DIE(Debugging Information Entry) of this CU */ diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die); if (!diep) continue; /* Check if target file is included. */ if (pp->file) pf->fname = cu_find_realpath(&pf->cu_die, pp->file); else pf->fname = NULL; if (!pp->file || pf->fname) { if (pp->function) ret = find_probe_point_by_func(pf); else if (pp->lazy_line) ret = find_probe_point_lazy(NULL, pf); else { pf->lno = pp->line; ret = find_probe_point_by_line(pf); } if (ret < 0) break; } off = noff; } found: line_list__free(&pf->lcache); if (dwfl) dwfl_end(dwfl); return ret; } /* Add a found probe point into trace event list */ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) { struct trace_event_finder *tf = container_of(pf, struct trace_event_finder, pf); struct probe_trace_event *tev; int ret, i; /* Check number of tevs */ if (tf->ntevs == tf->max_tevs) { pr_warning("Too many( > %d) probe point found.\n", tf->max_tevs); return -ERANGE; } tev = &tf->tevs[tf->ntevs++]; ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, &tev->point); if (ret < 0) return ret; pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, tev->point.offset); /* Find each argument */ tev->nargs = pf->pev->nargs; tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); if (tev->args == NULL) return -ENOMEM; for (i = 0; i < pf->pev->nargs; i++) { pf->pvar = &pf->pev->args[i]; pf->tvar = &tev->args[i]; ret = find_variable(sp_die, pf); if (ret != 0) return ret; } return 0; } /* Find probe_trace_events specified by perf_probe_event from debuginfo */ int find_probe_trace_events(int fd, struct perf_probe_event *pev, struct probe_trace_event **tevs, int max_tevs) { struct trace_event_finder tf = { .pf = {.pev = pev, .callback = add_probe_trace_event}, .max_tevs = max_tevs}; int ret; /* Allocate result tevs array */ *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs); if (*tevs == NULL) return -ENOMEM; tf.tevs = *tevs; tf.ntevs = 0; ret = find_probes(fd, &tf.pf); if (ret < 0) { free(*tevs); *tevs = NULL; return ret; } return (ret < 0) ? ret : tf.ntevs; } #define MAX_VAR_LEN 64 /* Collect available variables in this scope */ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) { struct available_var_finder *af = data; struct variable_list *vl; char buf[MAX_VAR_LEN]; int tag, ret; vl = &af->vls[af->nvls - 1]; tag = dwarf_tag(die_mem); if (tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) { ret = convert_variable_location(die_mem, af->pf.addr, af->pf.fb_ops, NULL); if (ret == 0) { ret = die_get_varname(die_mem, buf, MAX_VAR_LEN); pr_debug2("Add new var: %s\n", buf); if (ret > 0) strlist__add(vl->vars, buf); } } if (af->child && dwarf_haspc(die_mem, af->pf.addr)) return DIE_FIND_CB_CONTINUE; else return DIE_FIND_CB_SIBLING; } /* Add a found vars into available variables list */ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) { struct available_var_finder *af = container_of(pf, struct available_var_finder, pf); struct variable_list *vl; Dwarf_Die die_mem, *scopes = NULL; int ret, nscopes; /* Check number of tevs */ if (af->nvls == af->max_vls) { pr_warning("Too many( > %d) probe point found.\n", af->max_vls); return -ERANGE; } vl = &af->vls[af->nvls++]; ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, &vl->point); if (ret < 0) return ret; pr_debug("Probe point found: %s+%lu\n", vl->point.symbol, vl->point.offset); /* Find local variables */ vl->vars = strlist__new(true, NULL); if (vl->vars == NULL) return -ENOMEM; af->child = true; die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); /* Find external variables */ if (!af->externs) goto out; /* Don't need to search child DIE for externs. */ af->child = false; nscopes = dwarf_getscopes_die(sp_die, &scopes); while (nscopes-- > 1) die_find_child(&scopes[nscopes], collect_variables_cb, (void *)af, &die_mem); if (scopes) free(scopes); out: if (strlist__empty(vl->vars)) { strlist__delete(vl->vars); vl->vars = NULL; } return ret; } /* Find available variables at given probe point */ int find_available_vars_at(int fd, struct perf_probe_event *pev, struct variable_list **vls, int max_vls, bool externs) { struct available_var_finder af = { .pf = {.pev = pev, .callback = add_available_vars}, .max_vls = max_vls, .externs = externs}; int ret; /* Allocate result vls array */ *vls = zalloc(sizeof(struct variable_list) * max_vls); if (*vls == NULL) return -ENOMEM; af.vls = *vls; af.nvls = 0; ret = find_probes(fd, &af.pf); if (ret < 0) { /* Free vlist for error */ while (af.nvls--) { if (af.vls[af.nvls].point.symbol) free(af.vls[af.nvls].point.symbol); if (af.vls[af.nvls].vars) strlist__delete(af.vls[af.nvls].vars); } free(af.vls); *vls = NULL; return ret; } return (ret < 0) ? ret : af.nvls; } /* Reverse search */ int find_perf_probe_point(unsigned long addr, struct perf_probe_point *ppt) { Dwarf_Die cudie, spdie, indie; Dwarf *dbg = NULL; Dwfl *dwfl = NULL; Dwarf_Addr _addr, baseaddr, bias = 0; const char *fname = NULL, *func = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; /* Open the live linux kernel */ dbg = dwfl_init_live_kernel_dwarf(addr, &dwfl, &bias); if (!dbg) { pr_warning("No debug information found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); ret = -EINVAL; goto end; } /* Adjust address with bias */ addr += bias; /* Find cu die */ if (!dwarf_addrdie(dbg, (Dwarf_Addr)addr - bias, &cudie)) { pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; goto end; } /* Find a corresponding line (filename and lineno) */ cu_find_lineinfo(&cudie, addr, &fname, &lineno); /* Don't care whether it failed or not */ /* Find a corresponding function (name, baseline and baseaddr) */ if (die_find_real_subprogram(&cudie, (Dwarf_Addr)addr, &spdie)) { /* Get function entry information */ tmp = dwarf_diename(&spdie); if (!tmp || dwarf_entrypc(&spdie, &baseaddr) != 0 || dwarf_decl_line(&spdie, &baseline) != 0) goto post; func = tmp; if (addr == (unsigned long)baseaddr) /* Function entry - Relative line number is 0 */ lineno = baseline; else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr, &indie)) { if (dwarf_entrypc(&indie, &_addr) == 0 && _addr == addr) /* * addr is at an inline function entry. * In this case, lineno should be the call-site * line number. */ lineno = die_get_call_lineno(&indie); else { /* * addr is in an inline function body. * Since lineno points one of the lines * of the inline function, baseline should * be the entry line of the inline function. */ tmp = dwarf_diename(&indie); if (tmp && dwarf_decl_line(&spdie, &baseline) == 0) func = tmp; } } } post: /* Make a relative line number or an offset */ if (lineno) ppt->line = lineno - baseline; else if (func) ppt->offset = addr - (unsigned long)baseaddr; /* Duplicate strings */ if (func) { ppt->function = strdup(func); if (ppt->function == NULL) { ret = -ENOMEM; goto end; } } if (fname) { ppt->file = strdup(fname); if (ppt->file == NULL) { if (ppt->function) { free(ppt->function); ppt->function = NULL; } ret = -ENOMEM; goto end; } } end: if (dwfl) dwfl_end(dwfl); if (ret == 0 && (fname || func)) ret = 1; /* Found a point */ return ret; } /* Add a line and store the src path */ static int line_range_add_line(const char *src, unsigned int lineno, struct line_range *lr) { /* Copy source path */ if (!lr->path) { lr->path = strdup(src); if (lr->path == NULL) return -ENOMEM; } return line_list__add_line(&lr->line_list, lineno); } static int line_range_walk_cb(const char *fname, int lineno, Dwarf_Addr addr __used, void *data) { struct line_finder *lf = data; if ((strtailcmp(fname, lf->fname) != 0) || (lf->lno_s > lineno || lf->lno_e < lineno)) return 0; if (line_range_add_line(fname, lineno, lf->lr) < 0) return -EINVAL; return 0; } /* Find line range from its line number */ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) { int ret; ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf); /* Update status */ if (ret >= 0) if (!list_empty(&lf->lr->line_list)) ret = lf->found = 1; else ret = 0; /* Lines are not found */ else { free(lf->lr->path); lf->lr->path = NULL; } return ret; } static int line_range_inline_cb(Dwarf_Die *in_die, void *data) { struct dwarf_callback_param *param = data; param->retval = find_line_range_by_line(in_die, param->data); return DWARF_CB_ABORT; /* No need to find other instances */ } /* Search function from function name */ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) { struct dwarf_callback_param *param = data; struct line_finder *lf = param->data; struct line_range *lr = lf->lr; /* Check declared file */ if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die))) return DWARF_CB_OK; if (dwarf_tag(sp_die) == DW_TAG_subprogram && die_compare_name(sp_die, lr->function)) { lf->fname = dwarf_decl_file(sp_die); dwarf_decl_line(sp_die, &lr->offset); pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset); lf->lno_s = lr->offset + lr->start; if (lf->lno_s < 0) /* Overflow */ lf->lno_s = INT_MAX; lf->lno_e = lr->offset + lr->end; if (lf->lno_e < 0) /* Overflow */ lf->lno_e = INT_MAX; pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); lr->start = lf->lno_s; lr->end = lf->lno_e; if (dwarf_func_inline(sp_die)) { struct dwarf_callback_param _param; _param.data = (void *)lf; _param.retval = 0; dwarf_func_inline_instances(sp_die, line_range_inline_cb, &_param); param->retval = _param.retval; } else param->retval = find_line_range_by_line(sp_die, lf); return DWARF_CB_ABORT; } return DWARF_CB_OK; } static int find_line_range_by_func(struct line_finder *lf) { struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0}; dwarf_getfuncs(&lf->cu_die, line_range_search_cb, &param, 0); return param.retval; } int find_line_range(int fd, struct line_range *lr) { struct line_finder lf = {.lr = lr, .found = 0}; int ret = 0; Dwarf_Off off = 0, noff; size_t cuhl; Dwarf_Die *diep; Dwarf *dbg = NULL; Dwfl *dwfl; Dwarf_Addr bias; /* Currently ignored */ const char *comp_dir; dbg = dwfl_init_offline_dwarf(fd, &dwfl, &bias); if (!dbg) { pr_warning("No debug information found in the vmlinux - " "please rebuild with CONFIG_DEBUG_INFO=y.\n"); close(fd); /* Without dwfl_end(), fd isn't closed. */ return -EBADF; } /* Fastpath: lookup by function name from .debug_pubnames section */ if (lr->function) { struct pubname_callback_param pubname_param = { .function = lr->function, .file = lr->file, .cu_die = &lf.cu_die, .sp_die = &lf.sp_die, .found = 0}; struct dwarf_callback_param line_range_param = { .data = (void *)&lf, .retval = 0}; dwarf_getpubnames(dbg, pubname_search_cb, &pubname_param, 0); if (pubname_param.found) { line_range_search_cb(&lf.sp_die, &line_range_param); if (lf.found) goto found; } } /* Loop on CUs (Compilation Unit) */ while (!lf.found && ret >= 0) { if (dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) != 0) break; /* Get the DIE(Debugging Information Entry) of this CU */ diep = dwarf_offdie(dbg, off + cuhl, &lf.cu_die); if (!diep) continue; /* Check if target file is included. */ if (lr->file) lf.fname = cu_find_realpath(&lf.cu_die, lr->file); else lf.fname = 0; if (!lr->file || lf.fname) { if (lr->function) ret = find_line_range_by_func(&lf); else { lf.lno_s = lr->start; lf.lno_e = lr->end; ret = find_line_range_by_line(NULL, &lf); } } off = noff; } found: /* Store comp_dir */ if (lf.found) { comp_dir = cu_get_comp_dir(&lf.cu_die); if (comp_dir) { lr->comp_dir = strdup(comp_dir); if (!lr->comp_dir) ret = -ENOMEM; } } pr_debug("path: %s\n", lr->path); dwfl_end(dwfl); return (ret < 0) ? ret : lf.found; }
gpl-2.0
erikvanzijst/flatlinux
drivers/infiniband/hw/cxgb4/resource.c
2905
12208
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Crude resource management */ #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/ratelimit.h> #include "iw_cxgb4.h" static int c4iw_init_qid_table(struct c4iw_rdev *rdev) { u32 i; if (c4iw_id_table_alloc(&rdev->resource.qid_table, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->qp.size, 0)) return -ENOMEM; for (i = rdev->lldi.vr->qp.start; i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) if (!(i & rdev->qpmask)) c4iw_id_free(&rdev->resource.qid_table, i); return 0; } /* nr_* must be power of 2 */ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) { int err = 0; err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, C4IW_ID_TABLE_F_RANDOM); if (err) goto tpt_err; err = c4iw_init_qid_table(rdev); if (err) goto qid_err; err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, nr_pdid, 1, 0); if (err) goto pdid_err; return 0; pdid_err: c4iw_id_table_free(&rdev->resource.qid_table); qid_err: c4iw_id_table_free(&rdev->resource.tpt_table); tpt_err: return -ENOMEM; } /* * returns 0 if no resource available */ u32 c4iw_get_resource(struct c4iw_id_table *id_table) { u32 entry; entry = c4iw_id_alloc(id_table); if (entry == (u32)(-1)) return 0; return entry; } void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) { PDBG("%s entry 0x%x\n", __func__, entry); c4iw_id_free(id_table, entry); } u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->cqids)) { entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } /* * now put the same ids on the qp list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->qpids); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->cqids); mutex_unlock(&uctx->lock); } u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->qpids)) { entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } /* * now put the same ids on the cq list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->cqids); for (i = qid; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->qpids); mutex_unlock(&uctx->lock); } void c4iw_destroy_resource(struct c4iw_resource *rscp) { c4iw_id_table_free(&rscp->tpt_table); c4iw_id_table_free(&rscp->qid_table); c4iw_id_table_free(&rscp->pdid_table); } /* * PBL Memory Manager. Uses Linux generic allocator. */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) { unsigned pbl_start, pbl_chunk, pbl_top; rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); if (!rdev->pbl_pool) return -ENOMEM; pbl_start = rdev->lldi.vr->pbl.start; pbl_chunk = rdev->lldi.vr->pbl.size; pbl_top = pbl_start + pbl_chunk; while (pbl_start < pbl_top) { pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { PDBG("%s failed to add PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { printk(KERN_WARNING MOD "Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start); return 0; } pbl_chunk >>= 1; } else { PDBG("%s added PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); pbl_start += pbl_chunk; } } return 0; } void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->pbl_pool); } /* * RQT Memory Manager. Uses Linux generic allocator. */ #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); if (!addr) printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", pci_name(rdev->lldi.pdev)); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); mutex_lock(&rdev->stats.lock); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) { unsigned rqt_start, rqt_chunk, rqt_top; rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); if (!rdev->rqt_pool) return -ENOMEM; rqt_start = rdev->lldi.vr->rq.start; rqt_chunk = rdev->lldi.vr->rq.size; rqt_top = rqt_start + rqt_chunk; while (rqt_start < rqt_top) { rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { PDBG("%s failed to add RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { printk(KERN_WARNING MOD "Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start); return 0; } rqt_chunk >>= 1; } else { PDBG("%s added RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); rqt_start += rqt_chunk; } } return 0; } void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->rqt_pool); } /* * On-Chip QP Memory. */ #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); if (addr) { mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) rdev->stats.ocqp.max = rdev->stats.ocqp.cur; mutex_unlock(&rdev->stats.lock); } return (u32)addr; } void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); } int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) { unsigned start, chunk, top; rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); if (!rdev->ocqp_pool) return -ENOMEM; start = rdev->lldi.vr->ocq.start; chunk = rdev->lldi.vr->ocq.size; top = start + chunk; while (start < top) { chunk = min(top - start + 1, chunk); if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { PDBG("%s failed to add OCQP chunk (%x/%x)\n", __func__, start, chunk); if (chunk <= 1024 << MIN_OCQP_SHIFT) { printk(KERN_WARNING MOD "Failed to add all OCQP chunks (%x/%x)\n", start, top - start); return 0; } chunk >>= 1; } else { PDBG("%s added OCQP chunk (%x/%x)\n", __func__, start, chunk); start += chunk; } } return 0; } void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->ocqp_pool); }
gpl-2.0
jthatch12/SKJT
arch/arm/mach-s3c2410/gpio.c
4441
1871
/* linux/arch/arm/mach-s3c2410/gpio.c * * Copyright (c) 2004-2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 GPIO support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/gpio-fns.h> #include <asm/irq.h> #include <mach/regs-gpio.h> int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on, unsigned int config) { void __iomem *reg = S3C24XX_EINFLT0; unsigned long flags; unsigned long val; if (pin < S3C2410_GPG(8) || pin > S3C2410_GPG(15)) return -EINVAL; config &= 0xff; pin -= S3C2410_GPG(8); reg += pin & ~3; local_irq_save(flags); /* update filter width and clock source */ val = __raw_readl(reg); val &= ~(0xff << ((pin & 3) * 8)); val |= config << ((pin & 3) * 8); __raw_writel(val, reg); /* update filter enable */ val = __raw_readl(S3C24XX_EXTINT2); val &= ~(1 << ((pin * 4) + 3)); val |= on << ((pin * 4) + 3); __raw_writel(val, S3C24XX_EXTINT2); local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2410_gpio_irqfilter);
gpl-2.0
coinlake/xperia-tipo-kernel
drivers/tty/serial/bcm63xx_uart.c
8025
21239
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Derived from many drivers using generic_serial interface. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * * Serial driver for BCM63xx integrated UART. * * Hardware flow control was _not_ tested since I only have RX/TX on * my board. */ #if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/console.h> #include <linux/clk.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/sysrq.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <bcm63xx_clk.h> #include <bcm63xx_irq.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #define BCM63XX_NR_UARTS 2 static struct uart_port ports[BCM63XX_NR_UARTS]; /* * rx interrupt mask / stat * * mask: * - rx fifo full * - rx fifo above threshold * - rx fifo not empty for too long */ #define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \ UART_IR_MASK(UART_IR_RXTHRESH) | \ UART_IR_MASK(UART_IR_RXTIMEOUT)) #define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \ UART_IR_STAT(UART_IR_RXTHRESH) | \ UART_IR_STAT(UART_IR_RXTIMEOUT)) /* * tx interrupt mask / stat * * mask: * - tx fifo empty * - tx fifo below threshold */ #define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \ UART_IR_MASK(UART_IR_TXTRESH)) #define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \ UART_IR_STAT(UART_IR_TXTRESH)) /* * external input interrupt * * mask: any edge on CTS, DCD */ #define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \ UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD)) /* * handy uart register accessor */ static inline unsigned int bcm_uart_readl(struct uart_port *port, unsigned int offset) { return bcm_readl(port->membase + offset); } static inline void bcm_uart_writel(struct uart_port *port, unsigned int value, unsigned int offset) { bcm_writel(value, port->membase + offset); } /* * serial core request to check if uart tx fifo is empty */ static unsigned int bcm_uart_tx_empty(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0; } /* * serial core request to set RTS and DTR pin state and loopback mode */ static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { unsigned int val; val = bcm_uart_readl(port, UART_MCTL_REG); val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK); /* invert of written value is reflected on the pin */ if (!(mctrl & TIOCM_DTR)) val |= UART_MCTL_DTR_MASK; if (!(mctrl & TIOCM_RTS)) val |= UART_MCTL_RTS_MASK; bcm_uart_writel(port, val, UART_MCTL_REG); val = bcm_uart_readl(port, UART_CTL_REG); if (mctrl & TIOCM_LOOP) val |= UART_CTL_LOOPBACK_MASK; else val &= ~UART_CTL_LOOPBACK_MASK; bcm_uart_writel(port, val, UART_CTL_REG); } /* * serial core request to return RI, CTS, DCD and DSR pin state */ static unsigned int bcm_uart_get_mctrl(struct uart_port *port) { unsigned int val, mctrl; mctrl = 0; val = bcm_uart_readl(port, UART_EXTINP_REG); if (val & UART_EXTINP_RI_MASK) mctrl |= TIOCM_RI; if (val & UART_EXTINP_CTS_MASK) mctrl |= TIOCM_CTS; if (val & UART_EXTINP_DCD_MASK) mctrl |= TIOCM_CD; if (val & UART_EXTINP_DSR_MASK) mctrl |= TIOCM_DSR; return mctrl; } /* * serial core request to disable tx ASAP (used for flow control) */ static void bcm_uart_stop_tx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val &= ~(UART_CTL_TXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to (re)enable tx */ static void bcm_uart_start_tx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val |= UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); val = bcm_uart_readl(port, UART_CTL_REG); val |= UART_CTL_TXEN_MASK; bcm_uart_writel(port, val, UART_CTL_REG); } /* * serial core request to stop rx, called before port shutdown */ static void bcm_uart_stop_rx(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_RX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to enable modem status interrupt reporting */ static void bcm_uart_enable_ms(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); val |= UART_IR_MASK(UART_IR_EXTIP); bcm_uart_writel(port, val, UART_IR_REG); } /* * serial core request to start/stop emitting break char */ static void bcm_uart_break_ctl(struct uart_port *port, int ctl) { unsigned long flags; unsigned int val; spin_lock_irqsave(&port->lock, flags); val = bcm_uart_readl(port, UART_CTL_REG); if (ctl) val |= UART_CTL_XMITBRK_MASK; else val &= ~UART_CTL_XMITBRK_MASK; bcm_uart_writel(port, val, UART_CTL_REG); spin_unlock_irqrestore(&port->lock, flags); } /* * return port type in string format */ static const char *bcm_uart_type(struct uart_port *port) { return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL; } /* * read all chars in rx fifo and send them to core */ static void bcm_uart_do_rx(struct uart_port *port) { struct tty_struct *tty; unsigned int max_count; /* limit number of char read in interrupt, should not be * higher than fifo size anyway since we're much faster than * serial port */ max_count = 32; tty = port->state->port.tty; do { unsigned int iestat, c, cstat; char flag; /* get overrun/fifo empty information from ier * register */ iestat = bcm_uart_readl(port, UART_IR_REG); if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) { unsigned int val; /* fifo reset is required to clear * interrupt */ val = bcm_uart_readl(port, UART_CTL_REG); val |= UART_CTL_RSTRXFIFO_MASK; bcm_uart_writel(port, val, UART_CTL_REG); port->icount.overrun++; tty_insert_flip_char(tty, 0, TTY_OVERRUN); } if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) break; cstat = c = bcm_uart_readl(port, UART_FIFO_REG); port->icount.rx++; flag = TTY_NORMAL; c &= 0xff; if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) { /* do stats first */ if (cstat & UART_FIFO_BRKDET_MASK) { port->icount.brk++; if (uart_handle_break(port)) continue; } if (cstat & UART_FIFO_PARERR_MASK) port->icount.parity++; if (cstat & UART_FIFO_FRAMEERR_MASK) port->icount.frame++; /* update flag wrt read_status_mask */ cstat &= port->read_status_mask; if (cstat & UART_FIFO_BRKDET_MASK) flag = TTY_BREAK; if (cstat & UART_FIFO_FRAMEERR_MASK) flag = TTY_FRAME; if (cstat & UART_FIFO_PARERR_MASK) flag = TTY_PARITY; } if (uart_handle_sysrq_char(port, c)) continue; if ((cstat & port->ignore_status_mask) == 0) tty_insert_flip_char(tty, c, flag); } while (--max_count); tty_flip_buffer_push(tty); } /* * fill tx fifo with chars to send, stop when fifo is about to be full * or when all chars have been sent. */ static void bcm_uart_do_tx(struct uart_port *port) { struct circ_buf *xmit; unsigned int val, max_count; if (port->x_char) { bcm_uart_writel(port, port->x_char, UART_FIFO_REG); port->icount.tx++; port->x_char = 0; return; } if (uart_tx_stopped(port)) { bcm_uart_stop_tx(port); return; } xmit = &port->state->xmit; if (uart_circ_empty(xmit)) goto txq_empty; val = bcm_uart_readl(port, UART_MCTL_REG); val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; max_count = port->fifosize - val; while (max_count--) { unsigned int c; c = xmit->buf[xmit->tail]; bcm_uart_writel(port, c, UART_FIFO_REG); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) break; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) goto txq_empty; return; txq_empty: /* nothing to send, disable transmit interrupt */ val = bcm_uart_readl(port, UART_IR_REG); val &= ~UART_TX_INT_MASK; bcm_uart_writel(port, val, UART_IR_REG); return; } /* * process uart interrupt */ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) { struct uart_port *port; unsigned int irqstat; port = dev_id; spin_lock(&port->lock); irqstat = bcm_uart_readl(port, UART_IR_REG); if (irqstat & UART_RX_INT_STAT) bcm_uart_do_rx(port); if (irqstat & UART_TX_INT_STAT) bcm_uart_do_tx(port); if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) { unsigned int estat; estat = bcm_uart_readl(port, UART_EXTINP_REG); if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS)) uart_handle_cts_change(port, estat & UART_EXTINP_CTS_MASK); if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD)) uart_handle_dcd_change(port, estat & UART_EXTINP_DCD_MASK); } spin_unlock(&port->lock); return IRQ_HANDLED; } /* * enable rx & tx operation on uart */ static void bcm_uart_enable(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); } /* * disable rx & tx operation on uart */ static void bcm_uart_disable(struct uart_port *port) { unsigned int val; val = bcm_uart_readl(port, UART_CTL_REG); val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); bcm_uart_writel(port, val, UART_CTL_REG); } /* * clear all unread data in rx fifo and unsent data in tx fifo */ static void bcm_uart_flush(struct uart_port *port) { unsigned int val; /* empty rx and tx fifo */ val = bcm_uart_readl(port, UART_CTL_REG); val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK; bcm_uart_writel(port, val, UART_CTL_REG); /* read any pending char to make sure all irq status are * cleared */ (void)bcm_uart_readl(port, UART_FIFO_REG); } /* * serial core request to initialize uart and start rx operation */ static int bcm_uart_startup(struct uart_port *port) { unsigned int val; int ret; /* mask all irq and flush port */ bcm_uart_disable(port); bcm_uart_writel(port, 0, UART_IR_REG); bcm_uart_flush(port); /* clear any pending external input interrupt */ (void)bcm_uart_readl(port, UART_EXTINP_REG); /* set rx/tx fifo thresh to fifo half size */ val = bcm_uart_readl(port, UART_MCTL_REG); val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK); val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT; val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT; bcm_uart_writel(port, val, UART_MCTL_REG); /* set rx fifo timeout to 1 char time */ val = bcm_uart_readl(port, UART_CTL_REG); val &= ~UART_CTL_RXTMOUTCNT_MASK; val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT; bcm_uart_writel(port, val, UART_CTL_REG); /* report any edge on dcd and cts */ val = UART_EXTINP_INT_MASK; val |= UART_EXTINP_DCD_NOSENSE_MASK; val |= UART_EXTINP_CTS_NOSENSE_MASK; bcm_uart_writel(port, val, UART_EXTINP_REG); /* register irq and enable rx interrupts */ ret = request_irq(port->irq, bcm_uart_interrupt, 0, bcm_uart_type(port), port); if (ret) return ret; bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); bcm_uart_enable(port); return 0; } /* * serial core request to flush & disable uart */ static void bcm_uart_shutdown(struct uart_port *port) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); bcm_uart_writel(port, 0, UART_IR_REG); spin_unlock_irqrestore(&port->lock, flags); bcm_uart_disable(port); bcm_uart_flush(port); free_irq(port->irq, port); } /* * serial core request to change current uart setting */ static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new, struct ktermios *old) { unsigned int ctl, baud, quot, ier; unsigned long flags; spin_lock_irqsave(&port->lock, flags); /* disable uart while changing speed */ bcm_uart_disable(port); bcm_uart_flush(port); /* update Control register */ ctl = bcm_uart_readl(port, UART_CTL_REG); ctl &= ~UART_CTL_BITSPERSYM_MASK; switch (new->c_cflag & CSIZE) { case CS5: ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT); break; case CS6: ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT); break; case CS7: ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT); break; default: ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT); break; } ctl &= ~UART_CTL_STOPBITS_MASK; if (new->c_cflag & CSTOPB) ctl |= UART_CTL_STOPBITS_2; else ctl |= UART_CTL_STOPBITS_1; ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); if (new->c_cflag & PARENB) ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); if (new->c_cflag & PARODD) ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); bcm_uart_writel(port, ctl, UART_CTL_REG); /* update Baudword register */ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); quot = uart_get_divisor(port, baud) - 1; bcm_uart_writel(port, quot, UART_BAUD_REG); /* update Interrupt register */ ier = bcm_uart_readl(port, UART_IR_REG); ier &= ~UART_IR_MASK(UART_IR_EXTIP); if (UART_ENABLE_MS(port, new->c_cflag)) ier |= UART_IR_MASK(UART_IR_EXTIP); bcm_uart_writel(port, ier, UART_IR_REG); /* update read/ignore mask */ port->read_status_mask = UART_FIFO_VALID_MASK; if (new->c_iflag & INPCK) { port->read_status_mask |= UART_FIFO_FRAMEERR_MASK; port->read_status_mask |= UART_FIFO_PARERR_MASK; } if (new->c_iflag & (BRKINT)) port->read_status_mask |= UART_FIFO_BRKDET_MASK; port->ignore_status_mask = 0; if (new->c_iflag & IGNPAR) port->ignore_status_mask |= UART_FIFO_PARERR_MASK; if (new->c_iflag & IGNBRK) port->ignore_status_mask |= UART_FIFO_BRKDET_MASK; if (!(new->c_cflag & CREAD)) port->ignore_status_mask |= UART_FIFO_VALID_MASK; uart_update_timeout(port, new->c_cflag, baud); bcm_uart_enable(port); spin_unlock_irqrestore(&port->lock, flags); } /* * serial core request to claim uart iomem */ static int bcm_uart_request_port(struct uart_port *port) { unsigned int size; size = RSET_UART_SIZE; if (!request_mem_region(port->mapbase, size, "bcm63xx")) { dev_err(port->dev, "Memory region busy\n"); return -EBUSY; } port->membase = ioremap(port->mapbase, size); if (!port->membase) { dev_err(port->dev, "Unable to map registers\n"); release_mem_region(port->mapbase, size); return -EBUSY; } return 0; } /* * serial core request to release uart iomem */ static void bcm_uart_release_port(struct uart_port *port) { release_mem_region(port->mapbase, RSET_UART_SIZE); iounmap(port->membase); } /* * serial core request to do any port required autoconfiguration */ static void bcm_uart_config_port(struct uart_port *port, int flags) { if (flags & UART_CONFIG_TYPE) { if (bcm_uart_request_port(port)) return; port->type = PORT_BCM63XX; } } /* * serial core request to check that port information in serinfo are * suitable */ static int bcm_uart_verify_port(struct uart_port *port, struct serial_struct *serinfo) { if (port->type != PORT_BCM63XX) return -EINVAL; if (port->irq != serinfo->irq) return -EINVAL; if (port->iotype != serinfo->io_type) return -EINVAL; if (port->mapbase != (unsigned long)serinfo->iomem_base) return -EINVAL; return 0; } /* serial core callbacks */ static struct uart_ops bcm_uart_ops = { .tx_empty = bcm_uart_tx_empty, .get_mctrl = bcm_uart_get_mctrl, .set_mctrl = bcm_uart_set_mctrl, .start_tx = bcm_uart_start_tx, .stop_tx = bcm_uart_stop_tx, .stop_rx = bcm_uart_stop_rx, .enable_ms = bcm_uart_enable_ms, .break_ctl = bcm_uart_break_ctl, .startup = bcm_uart_startup, .shutdown = bcm_uart_shutdown, .set_termios = bcm_uart_set_termios, .type = bcm_uart_type, .release_port = bcm_uart_release_port, .request_port = bcm_uart_request_port, .config_port = bcm_uart_config_port, .verify_port = bcm_uart_verify_port, }; #ifdef CONFIG_SERIAL_BCM63XX_CONSOLE static inline void wait_for_xmitr(struct uart_port *port) { unsigned int tmout; /* Wait up to 10ms for the character(s) to be sent. */ tmout = 10000; while (--tmout) { unsigned int val; val = bcm_uart_readl(port, UART_IR_REG); if (val & UART_IR_STAT(UART_IR_TXEMPTY)) break; udelay(1); } /* Wait up to 1s for flow control if necessary */ if (port->flags & UPF_CONS_FLOW) { tmout = 1000000; while (--tmout) { unsigned int val; val = bcm_uart_readl(port, UART_EXTINP_REG); if (val & UART_EXTINP_CTS_MASK) break; udelay(1); } } } /* * output given char */ static void bcm_console_putchar(struct uart_port *port, int ch) { wait_for_xmitr(port); bcm_uart_writel(port, ch, UART_FIFO_REG); } /* * console core request to output given string */ static void bcm_console_write(struct console *co, const char *s, unsigned int count) { struct uart_port *port; unsigned long flags; int locked; port = &ports[co->index]; local_irq_save(flags); if (port->sysrq) { /* bcm_uart_interrupt() already took the lock */ locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&port->lock); } else { spin_lock(&port->lock); locked = 1; } /* call helper to deal with \r\n */ uart_console_write(port, s, count, bcm_console_putchar); /* and wait for char to be transmitted */ wait_for_xmitr(port); if (locked) spin_unlock(&port->lock); local_irq_restore(flags); } /* * console core request to setup given console, find matching uart * port and setup it. */ static int bcm_console_setup(struct console *co, char *options) { struct uart_port *port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; if (co->index < 0 || co->index >= BCM63XX_NR_UARTS) return -EINVAL; port = &ports[co->index]; if (!port->membase) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(port, co, baud, parity, bits, flow); } static struct uart_driver bcm_uart_driver; static struct console bcm63xx_console = { .name = "ttyS", .write = bcm_console_write, .device = uart_console_device, .setup = bcm_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &bcm_uart_driver, }; static int __init bcm63xx_console_init(void) { register_console(&bcm63xx_console); return 0; } console_initcall(bcm63xx_console_init); #define BCM63XX_CONSOLE (&bcm63xx_console) #else #define BCM63XX_CONSOLE NULL #endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */ static struct uart_driver bcm_uart_driver = { .owner = THIS_MODULE, .driver_name = "bcm63xx_uart", .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, .nr = BCM63XX_NR_UARTS, .cons = BCM63XX_CONSOLE, }; /* * platform driver probe/remove callback */ static int __devinit bcm_uart_probe(struct platform_device *pdev) { struct resource *res_mem, *res_irq; struct uart_port *port; struct clk *clk; int ret; if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS) return -EINVAL; if (ports[pdev->id].membase) return -EBUSY; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_mem) return -ENODEV; res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) return -ENODEV; clk = clk_get(&pdev->dev, "periph"); if (IS_ERR(clk)) return -ENODEV; port = &ports[pdev->id]; memset(port, 0, sizeof(*port)); port->iotype = UPIO_MEM; port->mapbase = res_mem->start; port->irq = res_irq->start; port->ops = &bcm_uart_ops; port->flags = UPF_BOOT_AUTOCONF; port->dev = &pdev->dev; port->fifosize = 16; port->uartclk = clk_get_rate(clk) / 2; port->line = pdev->id; clk_put(clk); ret = uart_add_one_port(&bcm_uart_driver, port); if (ret) { ports[pdev->id].membase = 0; return ret; } platform_set_drvdata(pdev, port); return 0; } static int __devexit bcm_uart_remove(struct platform_device *pdev) { struct uart_port *port; port = platform_get_drvdata(pdev); uart_remove_one_port(&bcm_uart_driver, port); platform_set_drvdata(pdev, NULL); /* mark port as free */ ports[pdev->id].membase = 0; return 0; } /* * platform driver stuff */ static struct platform_driver bcm_uart_platform_driver = { .probe = bcm_uart_probe, .remove = __devexit_p(bcm_uart_remove), .driver = { .owner = THIS_MODULE, .name = "bcm63xx_uart", }, }; static int __init bcm_uart_init(void) { int ret; ret = uart_register_driver(&bcm_uart_driver); if (ret) return ret; ret = platform_driver_register(&bcm_uart_platform_driver); if (ret) uart_unregister_driver(&bcm_uart_driver); return ret; } static void __exit bcm_uart_exit(void) { platform_driver_unregister(&bcm_uart_platform_driver); uart_unregister_driver(&bcm_uart_driver); } module_init(bcm_uart_init); module_exit(bcm_uart_exit); MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver"); MODULE_LICENSE("GPL");
gpl-2.0
TEAM-Gummy/android_kernel_sony_msm8x27
drivers/ata/pata_piccolo.c
9049
3908
/* * pata_piccolo.c - Toshiba Piccolo PATA/SATA controller driver. * * This is basically an update to ata_generic.c to add Toshiba Piccolo support * then split out to keep ata_generic "clean". * * Copyright 2005 Red Hat Inc, all rights reserved. * * Elements from ide/pci/generic.c * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> * Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com> * * May be copied or modified under the terms of the GNU General Public License * * The timing data tables/programming info are courtesy of the NetBSD driver */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_piccolo" #define DRV_VERSION "0.0.1" static void tosh_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const u16 pio[6] = { /* For reg 0x50 low word & E088 */ 0x0566, 0x0433, 0x0311, 0x0201, 0x0200, 0x0100 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 conf; pci_read_config_word(pdev, 0x50, &conf); conf &= 0xE088; conf |= pio[adev->pio_mode - XFER_PIO_0]; pci_write_config_word(pdev, 0x50, conf); } static void tosh_set_dmamode(struct ata_port *ap, struct ata_device *adev) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u32 conf; pci_read_config_dword(pdev, 0x5C, &conf); conf &= 0x78FFE088; /* Keep the other bits */ if (adev->dma_mode >= XFER_UDMA_0) { int udma = adev->dma_mode - XFER_UDMA_0; conf |= 0x80000000; conf |= (udma + 2) << 28; conf |= (2 - udma) * 0x111; /* spread into three nibbles */ } else { static const u32 mwdma[4] = { 0x0655, 0x0200, 0x0200, 0x0100 }; conf |= mwdma[adev->dma_mode - XFER_MW_DMA_0]; } pci_write_config_dword(pdev, 0x5C, conf); } static struct scsi_host_template tosh_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations tosh_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = tosh_set_piomode, .set_dmamode = tosh_set_dmamode }; /** * ata_tosh_init - attach generic IDE * @dev: PCI device found * @id: match entry * * Called each time a matching IDE interface is found. We check if the * interface is one we wish to claim and if so we perform any chip * specific hacks then let the ATA layer do the heavy lifting. */ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO5, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &tosh_port_ops }; const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; /* Just one port for the moment */ return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); } static struct pci_device_id ata_tosh[] = { { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, { 0, }, }; static struct pci_driver ata_tosh_pci_driver = { .name = DRV_NAME, .id_table = ata_tosh, .probe = ata_tosh_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init ata_tosh_init(void) { return pci_register_driver(&ata_tosh_pci_driver); } static void __exit ata_tosh_exit(void) { pci_unregister_driver(&ata_tosh_pci_driver); } MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("Low level driver for Toshiba Piccolo ATA"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, ata_tosh); MODULE_VERSION(DRV_VERSION); module_init(ata_tosh_init); module_exit(ata_tosh_exit);
gpl-2.0
sktjdgns1189/android_kernel_samsung_lentislte
drivers/video/msm/mdss/mdss_dsi_cmd.c
90
15446
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/iopoll.h> #include <linux/kthread.h> #include <linux/msm_iommu_domains.h> #include "mdss_dsi_cmd.h" #include "mdss_dsi.h" /* * mipi dsi buf mechanism */ char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len) { dp->data += len; return dp->data; } char *mdss_dsi_buf_unreserve(struct dsi_buf *dp, int len) { dp->data -= len; return dp->data; } char *mdss_dsi_buf_push(struct dsi_buf *dp, int len) { dp->data -= len; dp->len += len; return dp->data; } char *mdss_dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen) { dp->hdr = (u32 *)dp->data; return mdss_dsi_buf_reserve(dp, hlen); } char *mdss_dsi_buf_init(struct dsi_buf *dp) { int off; dp->data = dp->start; off = (int) (unsigned long) dp->data; /* 8 byte align */ off &= 0x07; if (off) off = 8 - off; dp->data += off; dp->len = 0; return dp->data; } int mdss_dsi_buf_alloc(struct dsi_buf *dp, int size) { int off; dp->start = dma_alloc_writecombine(NULL, size, &dp->dmap, GFP_KERNEL); if (dp->start == NULL) { pr_err("%s:%u\n", __func__, __LINE__); return -ENOMEM; } /* PAGE_SIZE align */ if ((u32)dp->start & (SZ_4K - 1)) { kfree(dp->start); dp->start = kmalloc(size * 2, GFP_KERNEL); if (dp->start == NULL) { pr_err("%s:%u\n", __func__, __LINE__); return -ENOMEM; } off = (int)dp->start; off &= (SZ_4K - 1); if (off) off = SZ_4K - off; dp->start += off; } dp->end = dp->start + size; dp->size = size; if ((int) (unsigned long) dp->start & 0x07) pr_err("%s: buf NOT 8 bytes aligned\n", __func__); dp->data = dp->start; dp->len = 0; return size; } /* * mipi dsi generic long write */ static int mdss_dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; char *bp; u32 *hp; int i, len = 0; dchdr = &cm->dchdr; bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); /* fill up payload */ if (cm->payload) { len = dchdr->dlen; len += 3; len &= ~0x03; /* multipled by 4 */ for (i = 0; i < dchdr->dlen; i++) *bp++ = cm->payload[i]; /* append 0xff to the end */ for (; i < len; i++) *bp++ = 0xff; dp->len += len; } /* fill up header */ hp = dp->hdr; *hp = 0; *hp = DSI_HDR_WC(dchdr->dlen); *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_LONG_PKT; *hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); len += DSI_HOST_HDR_SIZE; return len; } /* * mipi dsi generic short write with 0, 1 2 parameters */ static int mdss_dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; int len; dchdr = &cm->dchdr; if (dchdr->dlen && cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return 0; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); if (dchdr->last) *hp |= DSI_HDR_LAST; len = (dchdr->dlen > 2) ? 2 : dchdr->dlen; if (len == 1) { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1); *hp |= DSI_HDR_DATA1(cm->payload[0]); *hp |= DSI_HDR_DATA2(0); } else if (len == 2) { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2); *hp |= DSI_HDR_DATA1(cm->payload[0]); *hp |= DSI_HDR_DATA2(cm->payload[1]); } else { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE); *hp |= DSI_HDR_DATA1(0); *hp |= DSI_HDR_DATA2(0); } mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } /* * mipi dsi gerneric read with 0, 1 2 parameters */ static int mdss_dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; int len; dchdr = &cm->dchdr; if (dchdr->dlen && cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return 0; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_BTA; if (dchdr->last) *hp |= DSI_HDR_LAST; len = (dchdr->dlen > 2) ? 2 : dchdr->dlen; if (len == 1) { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1); *hp |= DSI_HDR_DATA1(cm->payload[0]); *hp |= DSI_HDR_DATA2(0); } else if (len == 2) { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2); *hp |= DSI_HDR_DATA1(cm->payload[0]); *hp |= DSI_HDR_DATA2(cm->payload[1]); } else { *hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ); *hp |= DSI_HDR_DATA1(0); *hp |= DSI_HDR_DATA2(0); } mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } /* * mipi dsi dcs long write */ static int mdss_dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; char *bp; u32 *hp; int i, len = 0; dchdr = &cm->dchdr; bp = mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); /* * fill up payload * dcs command byte (first byte) followed by payload */ if (cm->payload) { len = dchdr->dlen; len += 3; len &= ~0x03; /* multipled by 4 */ for (i = 0; i < dchdr->dlen; i++) *bp++ = cm->payload[i]; /* append 0xff to the end */ for (; i < len; i++) *bp++ = 0xff; dp->len += len; } /* fill up header */ hp = dp->hdr; *hp = 0; *hp = DSI_HDR_WC(dchdr->dlen); *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_LONG_PKT; *hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); len += DSI_HOST_HDR_SIZE; return len; } /* * mipi dsi dcs short write with 0 parameters */ static int mdss_dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; int len; dchdr = &cm->dchdr; if (cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return -EINVAL; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); if (dchdr->ack) /* ask ACK trigger msg from peripeheral */ *hp |= DSI_HDR_BTA; if (dchdr->last) *hp |= DSI_HDR_LAST; len = (dchdr->dlen > 1) ? 1 : dchdr->dlen; *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE); *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */ *hp |= DSI_HDR_DATA2(0); mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } /* * mipi dsi dcs short write with 1 parameters */ static int mdss_dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; if (dchdr->dlen < 2 || cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return -EINVAL; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); if (dchdr->ack) /* ask ACK trigger msg from peripeheral */ *hp |= DSI_HDR_BTA; if (dchdr->last) *hp |= DSI_HDR_LAST; *hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1); *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs comamnd byte */ *hp |= DSI_HDR_DATA2(cm->payload[1]); /* parameter */ mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } /* * mipi dsi dcs read with 0 parameters */ static int mdss_dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; if (cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return -EINVAL; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_BTA; *hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ); if (dchdr->last) *hp |= DSI_HDR_LAST; *hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */ *hp |= DSI_HDR_DATA2(0); mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_CM_ON); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; if (cm->payload == 0) { pr_err("%s: NO payload error\n", __func__); return 0; } mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE); if (dchdr->last) *hp |= DSI_HDR_LAST; *hp |= DSI_HDR_DATA1(cm->payload[0]); *hp |= DSI_HDR_DATA2(cm->payload[1]); mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp = DSI_HDR_WC(dchdr->dlen); *hp |= DSI_HDR_LONG_PKT; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } static int mdss_dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; u32 *hp; dchdr = &cm->dchdr; mdss_dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE); hp = dp->hdr; *hp = 0; *hp = DSI_HDR_WC(dchdr->dlen); *hp |= DSI_HDR_LONG_PKT; *hp |= DSI_HDR_VC(dchdr->vc); *hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT); if (dchdr->last) *hp |= DSI_HDR_LAST; mdss_dsi_buf_push(dp, DSI_HOST_HDR_SIZE); return DSI_HOST_HDR_SIZE; /* 4 bytes */ } /* * prepare cmd buffer to be txed */ int mdss_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm) { struct dsi_ctrl_hdr *dchdr; int len = 0; dchdr = &cm->dchdr; switch (dchdr->dtype) { case DTYPE_GEN_WRITE: case DTYPE_GEN_WRITE1: case DTYPE_GEN_WRITE2: len = mdss_dsi_generic_swrite(dp, cm); break; case DTYPE_GEN_LWRITE: len = mdss_dsi_generic_lwrite(dp, cm); break; case DTYPE_GEN_READ: case DTYPE_GEN_READ1: case DTYPE_GEN_READ2: len = mdss_dsi_generic_read(dp, cm); break; case DTYPE_DCS_LWRITE: len = mdss_dsi_dcs_lwrite(dp, cm); break; case DTYPE_DCS_WRITE: len = mdss_dsi_dcs_swrite(dp, cm); break; case DTYPE_DCS_WRITE1: len = mdss_dsi_dcs_swrite1(dp, cm); break; case DTYPE_DCS_READ: len = mdss_dsi_dcs_read(dp, cm); break; case DTYPE_MAX_PKTSIZE: len = mdss_dsi_set_max_pktsize(dp, cm); break; case DTYPE_NULL_PKT: len = mdss_dsi_null_pkt(dp, cm); break; case DTYPE_BLANK_PKT: len = mdss_dsi_blank_pkt(dp, cm); break; case DTYPE_CM_ON: len = mdss_dsi_cm_on(dp, cm); break; case DTYPE_CM_OFF: len = mdss_dsi_cm_off(dp, cm); break; case DTYPE_PERIPHERAL_ON: len = mdss_dsi_peripheral_on(dp, cm); break; case DTYPE_PERIPHERAL_OFF: len = mdss_dsi_peripheral_off(dp, cm); break; default: pr_debug("%s: dtype=%x NOT supported\n", __func__, dchdr->dtype); break; } return len; } /* * mdss_dsi_short_read1_resp: 1 parameter */ int mdss_dsi_short_read1_resp(struct dsi_buf *rp) { /* strip out dcs type */ rp->data++; rp->len = 1; return rp->len; } /* * mdss_dsi_short_read2_resp: 2 parameter */ int mdss_dsi_short_read2_resp(struct dsi_buf *rp) { /* strip out dcs type */ rp->data++; rp->len = 2; return rp->len; } int mdss_dsi_long_read_resp(struct dsi_buf *rp) { /* strip out dcs header */ rp->data += 4; rp->len -= 4; return rp->len; } static char set_tear_on[2] = {0x35, 0x00}; static struct dsi_cmd_desc dsi_tear_on_cmd = { {DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_tear_on)}, set_tear_on}; static char set_tear_off[2] = {0x34, 0x00}; static struct dsi_cmd_desc dsi_tear_off_cmd = { {DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(set_tear_off)}, set_tear_off}; void mdss_dsi_set_tear_on(struct mdss_dsi_ctrl_pdata *ctrl) { struct dcs_cmd_req cmdreq; cmdreq.cmds = &dsi_tear_on_cmd; cmdreq.cmds_cnt = 1; cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mdss_dsi_cmdlist_put(ctrl, &cmdreq); } void mdss_dsi_set_tear_off(struct mdss_dsi_ctrl_pdata *ctrl) { struct dcs_cmd_req cmdreq; cmdreq.cmds = &dsi_tear_off_cmd; cmdreq.cmds_cnt = 1; cmdreq.flags = CMD_REQ_COMMIT; cmdreq.rlen = 0; cmdreq.cb = NULL; mdss_dsi_cmdlist_put(ctrl, &cmdreq); } /* * mdss_dsi_cmd_get: ctrl->cmd_mutex acquired by caller */ struct dcs_cmd_req *mdss_dsi_cmdlist_get(struct mdss_dsi_ctrl_pdata *ctrl) { struct dcs_cmd_list *clist; struct dcs_cmd_req *req = NULL; clist = &ctrl->cmdlist; if (clist->get != clist->put) { req = &clist->list[clist->get]; clist->get++; clist->get %= CMD_REQ_MAX; clist->tot--; pr_debug("%s: tot=%d put=%d get=%d\n", __func__, clist->tot, clist->put, clist->get); } return req; } int mdss_dsi_cmdlist_put(struct mdss_dsi_ctrl_pdata *ctrl, struct dcs_cmd_req *cmdreq) { struct dcs_cmd_req *req; struct dcs_cmd_list *clist; int ret = 0; mutex_lock(&ctrl->cmd_mutex); clist = &ctrl->cmdlist; req = &clist->list[clist->put]; *req = *cmdreq; clist->put++; clist->put %= CMD_REQ_MAX; clist->tot++; if (clist->put == clist->get) { /* drop the oldest one */ pr_debug("%s: DROP, tot=%d put=%d get=%d\n", __func__, clist->tot, clist->put, clist->get); clist->get++; clist->get %= CMD_REQ_MAX; clist->tot--; } pr_debug("%s: tot=%d put=%d get=%d\n", __func__, clist->tot, clist->put, clist->get); if (req->flags & CMD_REQ_COMMIT) { if (!ctrl->cmdlist_commit) pr_err("cmdlist_commit not implemented!\n"); else ret = ctrl->cmdlist_commit(ctrl, 0); } mutex_unlock(&ctrl->cmd_mutex); return ret; }
gpl-2.0