repo_name
string
path
string
copies
string
size
string
content
string
license
string
astrofimov/limbo-android
jni/qemu/roms/ipxe/src/core/gdbudp.c
90
7225
/* * Copyright (C) 2008 Stefan Hajnoczi <stefanha@gmail.com>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ FILE_LICENCE ( GPL2_OR_LATER ); #include <stdio.h> #include <string.h> #include <byteswap.h> #include <ipxe/iobuf.h> #include <ipxe/in.h> #include <ipxe/if_arp.h> #include <ipxe/if_ether.h> #include <ipxe/ip.h> #include <ipxe/udp.h> #include <ipxe/netdevice.h> #include <ipxe/nap.h> #include <ipxe/gdbstub.h> #include <ipxe/gdbudp.h> /** @file * * GDB over UDP transport * */ enum { DEFAULT_PORT = 43770, /* UDP listen port */ }; struct gdb_transport udp_gdb_transport __gdb_transport; static struct net_device *netdev; static uint8_t dest_eth[ETH_ALEN]; static struct sockaddr_in dest_addr; static struct sockaddr_in source_addr; static void gdbudp_ensure_netdev_open ( struct net_device *netdev ) { /* The device may have been closed between breakpoints */ assert ( netdev ); netdev_open ( netdev ); /* Strictly speaking, we may need to close the device when leaving the interrupt handler */ } static size_t gdbudp_recv ( char *buf, size_t len ) { struct io_buffer *iob; struct ethhdr *ethhdr; struct arphdr *arphdr; struct iphdr *iphdr; struct udp_header *udphdr; size_t payload_len; gdbudp_ensure_netdev_open ( netdev ); for ( ; ; ) { netdev_poll ( netdev ); while ( ( iob = netdev_rx_dequeue ( netdev ) ) != NULL ) { /* Ethernet header */ if ( iob_len ( iob ) < sizeof ( *ethhdr ) ) { goto bad_packet; } ethhdr = iob->data; iob_pull ( iob, sizeof ( *ethhdr ) ); /* Handle ARP requests so the client can find our MAC */ if ( ethhdr->h_protocol == htons ( ETH_P_ARP ) ) { arphdr = iob->data; if ( iob_len ( iob ) < sizeof ( *arphdr ) + 2 * ( ETH_ALEN + sizeof ( struct in_addr ) ) || arphdr->ar_hrd != htons ( ARPHRD_ETHER ) || arphdr->ar_pro != htons ( ETH_P_IP ) || arphdr->ar_hln != ETH_ALEN || arphdr->ar_pln != sizeof ( struct in_addr ) || arphdr->ar_op != htons ( ARPOP_REQUEST ) || * ( uint32_t * ) arp_target_pa ( arphdr ) != source_addr.sin_addr.s_addr ) { goto bad_packet; } /* Generate an ARP reply */ arphdr->ar_op = htons ( ARPOP_REPLY ); memswap ( arp_sender_pa ( arphdr ), arp_target_pa ( arphdr ), sizeof ( struct in_addr ) ); memcpy ( arp_target_ha ( arphdr ), arp_sender_ha ( arphdr ), ETH_ALEN ); memcpy ( arp_sender_ha ( arphdr ), netdev->ll_addr, ETH_ALEN ); /* Fix up ethernet header */ ethhdr = iob_push ( iob, sizeof ( *ethhdr ) ); memcpy ( ethhdr->h_dest, ethhdr->h_source, ETH_ALEN ); memcpy ( ethhdr->h_source, netdev->ll_addr, ETH_ALEN ); netdev_tx ( netdev, iob ); continue; /* no need to free iob */ } if ( ethhdr->h_protocol != htons ( ETH_P_IP ) ) { goto bad_packet; } /* IP header */ if ( iob_len ( iob ) < sizeof ( *iphdr ) ) { goto bad_packet; } iphdr = iob->data; iob_pull ( iob, sizeof ( *iphdr ) ); if ( iphdr->protocol != IP_UDP || iphdr->dest.s_addr != source_addr.sin_addr.s_addr ) { goto bad_packet; } /* UDP header */ if ( iob_len ( iob ) < sizeof ( *udphdr ) ) { goto bad_packet; } udphdr = iob->data; if ( udphdr->dest != source_addr.sin_port ) { goto bad_packet; } /* Learn the remote connection details */ memcpy ( dest_eth, ethhdr->h_source, ETH_ALEN ); dest_addr.sin_addr.s_addr = iphdr->src.s_addr; dest_addr.sin_port = udphdr->src; /* Payload */ payload_len = ntohs ( udphdr->len ); if ( payload_len < sizeof ( *udphdr ) || payload_len > iob_len ( iob ) ) { goto bad_packet; } payload_len -= sizeof ( *udphdr ); iob_pull ( iob, sizeof ( *udphdr ) ); if ( payload_len > len ) { goto bad_packet; } memcpy ( buf, iob->data, payload_len ); free_iob ( iob ); return payload_len; bad_packet: free_iob ( iob ); } cpu_nap(); } } static void gdbudp_send ( const char *buf, size_t len ) { struct io_buffer *iob; struct ethhdr *ethhdr; struct iphdr *iphdr; struct udp_header *udphdr; /* Check that we are connected */ if ( dest_addr.sin_port == 0 ) { return; } gdbudp_ensure_netdev_open ( netdev ); iob = alloc_iob ( sizeof ( *ethhdr ) + sizeof ( *iphdr ) + sizeof ( *udphdr ) + len ); if ( !iob ) { return; } /* Payload */ iob_reserve ( iob, sizeof ( *ethhdr ) + sizeof ( *iphdr ) + sizeof ( *udphdr ) ); memcpy ( iob_put ( iob, len ), buf, len ); /* UDP header */ udphdr = iob_push ( iob, sizeof ( *udphdr ) ); udphdr->src = source_addr.sin_port; udphdr->dest = dest_addr.sin_port; udphdr->len = htons ( iob_len ( iob ) ); udphdr->chksum = 0; /* optional and we are not using it */ /* IP header */ iphdr = iob_push ( iob, sizeof ( *iphdr ) ); memset ( iphdr, 0, sizeof ( *iphdr ) ); iphdr->verhdrlen = ( IP_VER | ( sizeof ( *iphdr ) / 4 ) ); iphdr->service = IP_TOS; iphdr->len = htons ( iob_len ( iob ) ); iphdr->ttl = IP_TTL; iphdr->protocol = IP_UDP; iphdr->dest.s_addr = dest_addr.sin_addr.s_addr; iphdr->src.s_addr = source_addr.sin_addr.s_addr; iphdr->chksum = tcpip_chksum ( iphdr, sizeof ( *iphdr ) ); /* Ethernet header */ ethhdr = iob_push ( iob, sizeof ( *ethhdr ) ); memcpy ( ethhdr->h_dest, dest_eth, ETH_ALEN ); memcpy ( ethhdr->h_source, netdev->ll_addr, ETH_ALEN ); ethhdr->h_protocol = htons ( ETH_P_IP ); netdev_tx ( netdev, iob ); } struct gdb_transport *gdbudp_configure ( const char *name, struct sockaddr_in *addr ) { struct settings *settings; /* Release old network device */ netdev_put ( netdev ); netdev = find_netdev ( name ); if ( !netdev ) { return NULL; } /* Hold network device */ netdev_get ( netdev ); /* Source UDP port */ source_addr.sin_port = ( addr && addr->sin_port ) ? addr->sin_port : htons ( DEFAULT_PORT ); /* Source IP address */ if ( addr && addr->sin_addr.s_addr ) { source_addr.sin_addr.s_addr = addr->sin_addr.s_addr; } else { settings = netdev_settings ( netdev ); fetch_ipv4_setting ( settings, &ip_setting, &source_addr.sin_addr ); if ( source_addr.sin_addr.s_addr == 0 ) { netdev_put ( netdev ); netdev = NULL; return NULL; } } return &udp_gdb_transport; } static int gdbudp_init ( int argc, char **argv ) { if ( argc != 1 ) { printf ( "udp: missing <interface> argument\n" ); return 1; } if ( !gdbudp_configure ( argv[0], NULL ) ) { printf ( "%s: device does not exist or has no IP address\n", argv[0] ); return 1; } return 0; } struct gdb_transport udp_gdb_transport __gdb_transport = { .name = "udp", .init = gdbudp_init, .send = gdbudp_send, .recv = gdbudp_recv, };
gpl-2.0
Napstar-xda/Huawei-Ideos-X6-Kernel
fs/notify/inotify/inotify.c
602
26509
/* * fs/inotify.c - inode-based file event notifications * * Authors: * John McCutchan <ttb@tentacle.dhs.org> * Robert Love <rml@novell.com> * * Kernel API added by: Amy Griffis <amy.griffis@hp.com> * * Copyright (C) 2005 John McCutchan * Copyright 2006 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/list.h> #include <linux/writeback.h> #include <linux/inotify.h> #include <linux/fsnotify_backend.h> static atomic_t inotify_cookie; /* * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) * iprune_mutex (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) * inotify_handle->mutex (protects inotify_handle and watches->h_list) * * The inode->inotify_mutex and inotify_handle->mutex and held during execution * of a caller's event handler. Thus, the caller must not hold any locks * taken in their event handler while calling any of the published inotify * interfaces. */ /* * Lifetimes of the three main data structures--inotify_handle, inode, and * inotify_watch--are managed by reference count. * * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). * Additional references can bump the count via get_inotify_handle() and drop * the count via put_inotify_handle(). * * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() * to remove_watch_no_event(). Additional references can bump the count via * get_inotify_watch() and drop the count via put_inotify_watch(). The caller * is reponsible for the final put after receiving IN_IGNORED, or when using * IN_ONESHOT after receiving the first event. Inotify does the final put if * inotify_destroy() is called. * * inode: Pinned so long as the inode is associated with a watch, from * inotify_add_watch() to the final put_inotify_watch(). */ /* * struct inotify_handle - represents an inotify instance * * This structure is protected by the mutex 'mutex'. */ struct inotify_handle { struct idr idr; /* idr mapping wd -> watch */ struct mutex mutex; /* protects this bad boy */ struct list_head watches; /* list of watches */ atomic_t count; /* reference count */ u32 last_wd; /* the last wd allocated */ const struct inotify_operations *in_ops; /* inotify caller operations */ }; static inline void get_inotify_handle(struct inotify_handle *ih) { atomic_inc(&ih->count); } static inline void put_inotify_handle(struct inotify_handle *ih) { if (atomic_dec_and_test(&ih->count)) { idr_destroy(&ih->idr); kfree(ih); } } /** * get_inotify_watch - grab a reference to an inotify_watch * @watch: watch to grab */ void get_inotify_watch(struct inotify_watch *watch) { atomic_inc(&watch->count); } EXPORT_SYMBOL_GPL(get_inotify_watch); int pin_inotify_watch(struct inotify_watch *watch) { struct super_block *sb = watch->inode->i_sb; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); atomic_inc(&watch->count); return 1; } spin_unlock(&sb_lock); return 0; } /** * put_inotify_watch - decrements the ref count on a given watch. cleans up * watch references if the count reaches zero. inotify_watch is freed by * inotify callers via the destroy_watch() op. * @watch: watch to release */ void put_inotify_watch(struct inotify_watch *watch) { if (atomic_dec_and_test(&watch->count)) { struct inotify_handle *ih = watch->ih; iput(watch->inode); ih->in_ops->destroy_watch(watch); put_inotify_handle(ih); } } EXPORT_SYMBOL_GPL(put_inotify_watch); void unpin_inotify_watch(struct inotify_watch *watch) { struct super_block *sb = watch->inode->i_sb; put_inotify_watch(watch); deactivate_super(sb); } /* * inotify_handle_get_wd - returns the next WD for use by the given handle * * Callers must hold ih->mutex. This function can sleep. */ static int inotify_handle_get_wd(struct inotify_handle *ih, struct inotify_watch *watch) { int ret; do { if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS))) return -ENOSPC; ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); } while (ret == -EAGAIN); if (likely(!ret)) ih->last_wd = watch->wd; return ret; } /* * inotify_inode_watched - returns nonzero if there are watches on this inode * and zero otherwise. We call this lockless, we do not care if we race. */ static inline int inotify_inode_watched(struct inode *inode) { return !list_empty(&inode->inotify_watches); } /* * Get child dentry flag into synch with parent inode. * Flag should always be clear for negative dentrys. */ static void set_dentry_child_flags(struct inode *inode, int watched) { struct dentry *alias; spin_lock(&dcache_lock); list_for_each_entry(alias, &inode->i_dentry, d_alias) { struct dentry *child; list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { if (!child->d_inode) continue; spin_lock(&child->d_lock); if (watched) child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; else child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } } spin_unlock(&dcache_lock); } /* * inotify_find_handle - find the watch associated with the given inode and * handle * * Callers must hold inode->inotify_mutex. */ static struct inotify_watch *inode_find_handle(struct inode *inode, struct inotify_handle *ih) { struct inotify_watch *watch; list_for_each_entry(watch, &inode->inotify_watches, i_list) { if (watch->ih == ih) return watch; } return NULL; } /* * remove_watch_no_event - remove watch without the IN_IGNORED event. * * Callers must hold both inode->inotify_mutex and ih->mutex. */ static void remove_watch_no_event(struct inotify_watch *watch, struct inotify_handle *ih) { list_del(&watch->i_list); list_del(&watch->h_list); if (!inotify_inode_watched(watch->inode)) set_dentry_child_flags(watch->inode, 0); idr_remove(&ih->idr, watch->wd); } /** * inotify_remove_watch_locked - Remove a watch from both the handle and the * inode. Sends the IN_IGNORED event signifying that the inode is no longer * watched. May be invoked from a caller's event handler. * @ih: inotify handle associated with watch * @watch: watch to remove * * Callers must hold both inode->inotify_mutex and ih->mutex. */ void inotify_remove_watch_locked(struct inotify_handle *ih, struct inotify_watch *watch) { remove_watch_no_event(watch, ih); ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); /* Kernel API for producing events */ /* * inotify_d_instantiate - instantiate dcache entry for inode */ void inotify_d_instantiate(struct dentry *entry, struct inode *inode) { struct dentry *parent; if (!inode) return; spin_lock(&entry->d_lock); parent = entry->d_parent; if (parent->d_inode && inotify_inode_watched(parent->d_inode)) entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; spin_unlock(&entry->d_lock); } /* * inotify_d_move - dcache entry has been moved */ void inotify_d_move(struct dentry *entry) { struct dentry *parent; parent = entry->d_parent; if (inotify_inode_watched(parent->d_inode)) entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; else entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; } /** * inotify_inode_queue_event - queue an event to all watches on this inode * @inode: inode event is originating from * @mask: event mask describing this event * @cookie: cookie for synchronization, or zero * @name: filename, if any * @n_inode: inode associated with name */ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, const char *name, struct inode *n_inode) { struct inotify_watch *watch, *next; if (!inotify_inode_watched(inode)) return; mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { u32 watch_mask = watch->mask; if (watch_mask & mask) { struct inotify_handle *ih= watch->ih; mutex_lock(&ih->mutex); if (watch_mask & IN_ONESHOT) remove_watch_no_event(watch, ih); ih->in_ops->handle_event(watch, watch->wd, mask, cookie, name, n_inode); mutex_unlock(&ih->mutex); } } mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_queue_event); /** * inotify_dentry_parent_queue_event - queue an event to a dentry's parent * @dentry: the dentry in question, we queue against this dentry's parent * @mask: event mask describing this event * @cookie: cookie for synchronization, or zero * @name: filename, if any */ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, u32 cookie, const char *name) { struct dentry *parent; struct inode *inode; if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) return; spin_lock(&dentry->d_lock); parent = dentry->d_parent; inode = parent->d_inode; if (inotify_inode_watched(inode)) { dget(parent); spin_unlock(&dentry->d_lock); inotify_inode_queue_event(inode, mask, cookie, name, dentry->d_inode); dput(parent); } else spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); /** * inotify_get_cookie - return a unique cookie for use in synchronizing events. */ u32 inotify_get_cookie(void) { return atomic_inc_return(&inotify_cookie); } EXPORT_SYMBOL_GPL(inotify_get_cookie); /** * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) { struct inode *inode, *next_i, *need_iput = NULL; list_for_each_entry_safe(inode, next_i, list, i_sb_list) { struct inotify_watch *watch, *next_w; struct inode *need_iput_tmp; struct list_head *watches; /* * We cannot __iget() an inode in state I_CLEAR, I_FREEING, * I_WILL_FREE, or I_NEW which is fine because by that point * the inode cannot have any associated watches. */ if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) continue; /* * If i_count is zero, the inode cannot have any watches and * doing an __iget/iput with MS_ACTIVE clear would actually * evict all inodes with zero i_count from icache which is * unnecessarily violent and may in fact be illegal to do. */ if (!atomic_read(&inode->i_count)) continue; need_iput_tmp = need_iput; need_iput = NULL; /* In case inotify_remove_watch_locked() drops a reference. */ if (inode != need_iput_tmp) __iget(inode); else need_iput_tmp = NULL; /* In case the dropping of a reference would nuke next_i. */ if ((&next_i->i_sb_list != list) && atomic_read(&next_i->i_count) && !(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) { __iget(next_i); need_iput = next_i; } /* * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, * iprune_mutex keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); if (need_iput_tmp) iput(need_iput_tmp); /* for each watch, send IN_UNMOUNT and then remove it */ mutex_lock(&inode->inotify_mutex); watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_handle *ih= watch->ih; get_inotify_watch(watch); mutex_lock(&ih->mutex); ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, NULL, NULL); inotify_remove_watch_locked(ih, watch); mutex_unlock(&ih->mutex); put_inotify_watch(watch); } mutex_unlock(&inode->inotify_mutex); iput(inode); spin_lock(&inode_lock); } } EXPORT_SYMBOL_GPL(inotify_unmount_inodes); /** * inotify_inode_is_dead - an inode has been deleted, cleanup any watches * @inode: inode that is about to be removed */ void inotify_inode_is_dead(struct inode *inode) { struct inotify_watch *watch, *next; mutex_lock(&inode->inotify_mutex); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { struct inotify_handle *ih = watch->ih; mutex_lock(&ih->mutex); inotify_remove_watch_locked(ih, watch); mutex_unlock(&ih->mutex); } mutex_unlock(&inode->inotify_mutex); } EXPORT_SYMBOL_GPL(inotify_inode_is_dead); /* Kernel Consumer API */ /** * inotify_init - allocate and initialize an inotify instance * @ops: caller's inotify operations */ struct inotify_handle *inotify_init(const struct inotify_operations *ops) { struct inotify_handle *ih; ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); if (unlikely(!ih)) return ERR_PTR(-ENOMEM); idr_init(&ih->idr); INIT_LIST_HEAD(&ih->watches); mutex_init(&ih->mutex); ih->last_wd = 0; ih->in_ops = ops; atomic_set(&ih->count, 0); get_inotify_handle(ih); return ih; } EXPORT_SYMBOL_GPL(inotify_init); /** * inotify_init_watch - initialize an inotify watch * @watch: watch to initialize */ void inotify_init_watch(struct inotify_watch *watch) { INIT_LIST_HEAD(&watch->h_list); INIT_LIST_HEAD(&watch->i_list); atomic_set(&watch->count, 0); get_inotify_watch(watch); /* initial get */ } EXPORT_SYMBOL_GPL(inotify_init_watch); /* * Watch removals suck violently. To kick the watch out we need (in this * order) inode->inotify_mutex and ih->mutex. That's fine if we have * a hold on inode; however, for all other cases we need to make damn sure * we don't race with umount. We can *NOT* just grab a reference to a * watch - inotify_unmount_inodes() will happily sail past it and we'll end * with reference to inode potentially outliving its superblock. Ideally * we just want to grab an active reference to superblock if we can; that * will make sure we won't go into inotify_umount_inodes() until we are * done. Cleanup is just deactivate_super(). However, that leaves a messy * case - what if we *are* racing with umount() and active references to * superblock can't be acquired anymore? We can bump ->s_count, grab * ->s_umount, which will almost certainly wait until the superblock is shut * down and the watch in question is pining for fjords. That's fine, but * there is a problem - we might have hit the window between ->s_active * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock * is past the point of no return and is heading for shutdown) and the * moment when deactivate_super() acquires ->s_umount. We could just do * drop_super() yield() and retry, but that's rather antisocial and this * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having * found that we'd got there first (i.e. that ->s_root is non-NULL) we know * that we won't race with inotify_umount_inodes(). So we could grab a * reference to watch and do the rest as above, just with drop_super() instead * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we * could grab ->s_umount. So the watch could've been gone already. * * That still can be dealt with - we need to save watch->wd, do idr_find() * and compare its result with our pointer. If they match, we either have * the damn thing still alive or we'd lost not one but two races at once, * the watch had been killed and a new one got created with the same ->wd * at the same address. That couldn't have happened in inotify_destroy(), * but inotify_rm_wd() could run into that. Still, "new one got created" * is not a problem - we have every right to kill it or leave it alone, * whatever's more convenient. * * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as * "grab it and kill it" check. If it's been our original watch, we are * fine, if it's a newcomer - nevermind, just pretend that we'd won the * race and kill the fscker anyway; we are safe since we know that its * superblock won't be going away. * * And yes, this is far beyond mere "not very pretty"; so's the entire * concept of inotify to start with. */ /** * pin_to_kill - pin the watch down for removal * @ih: inotify handle * @watch: watch to kill * * Called with ih->mutex held, drops it. Possible return values: * 0 - nothing to do, it has died * 1 - remove it, drop the reference and deactivate_super() * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid * that variant, since it involved a lot of PITA, but that's the best that * could've been done. */ static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) { struct super_block *sb = watch->inode->i_sb; s32 wd = watch->wd; spin_lock(&sb_lock); if (sb->s_count >= S_BIAS) { atomic_inc(&sb->s_active); spin_unlock(&sb_lock); get_inotify_watch(watch); mutex_unlock(&ih->mutex); return 1; /* the best outcome */ } sb->s_count++; spin_unlock(&sb_lock); mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ down_read(&sb->s_umount); if (likely(!sb->s_root)) { /* fs is already shut down; the watch is dead */ drop_super(sb); return 0; } /* raced with the final deactivate_super() */ mutex_lock(&ih->mutex); if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) { /* the watch is dead */ mutex_unlock(&ih->mutex); drop_super(sb); return 0; } /* still alive or freed and reused with the same sb and wd; kill */ get_inotify_watch(watch); mutex_unlock(&ih->mutex); return 2; } static void unpin_and_kill(struct inotify_watch *watch, int how) { struct super_block *sb = watch->inode->i_sb; put_inotify_watch(watch); switch (how) { case 1: deactivate_super(sb); break; case 2: drop_super(sb); } } /** * inotify_destroy - clean up and destroy an inotify instance * @ih: inotify handle */ void inotify_destroy(struct inotify_handle *ih) { /* * Destroy all of the watches for this handle. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to * hold inode->inotify_mutex before ih->mutex. The following works. * * AV: it had to become even uglier to start working ;-/ */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct super_block *sb; struct inode *inode; int how; mutex_lock(&ih->mutex); watches = &ih->watches; if (list_empty(watches)) { mutex_unlock(&ih->mutex); break; } watch = list_first_entry(watches, struct inotify_watch, h_list); sb = watch->inode->i_sb; how = pin_to_kill(ih, watch); if (!how) continue; inode = watch->inode; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* make sure we didn't race with another list removal */ if (likely(idr_find(&ih->idr, watch->wd))) { remove_watch_no_event(watch, ih); put_inotify_watch(watch); } mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); unpin_and_kill(watch, how); } /* free this handle: the put matching the get in inotify_init() */ put_inotify_handle(ih); } EXPORT_SYMBOL_GPL(inotify_destroy); /** * inotify_find_watch - find an existing watch for an (ih,inode) pair * @ih: inotify handle * @inode: inode to watch * @watchp: pointer to existing inotify_watch * * Caller must pin given inode (via nameidata). */ s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, struct inotify_watch **watchp) { struct inotify_watch *old; int ret = -ENOENT; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); old = inode_find_handle(inode, ih); if (unlikely(old)) { get_inotify_watch(old); /* caller must put watch */ *watchp = old; ret = old->wd; } mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); return ret; } EXPORT_SYMBOL_GPL(inotify_find_watch); /** * inotify_find_update_watch - find and update the mask of an existing watch * @ih: inotify handle * @inode: inode's watch to update * @mask: mask of events to watch * * Caller must pin given inode (via nameidata). */ s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, u32 mask) { struct inotify_watch *old; int mask_add = 0; int ret; if (mask & IN_MASK_ADD) mask_add = 1; /* don't allow invalid bits: we don't want flags set */ mask &= IN_ALL_EVENTS | IN_ONESHOT; if (unlikely(!mask)) return -EINVAL; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* * Handle the case of re-adding a watch on an (inode,ih) pair that we * are already watching. We just update the mask and return its wd. */ old = inode_find_handle(inode, ih); if (unlikely(!old)) { ret = -ENOENT; goto out; } if (mask_add) old->mask |= mask; else old->mask = mask; ret = old->wd; out: mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); return ret; } EXPORT_SYMBOL_GPL(inotify_find_update_watch); /** * inotify_add_watch - add a watch to an inotify instance * @ih: inotify handle * @watch: caller allocated watch structure * @inode: inode to watch * @mask: mask of events to watch * * Caller must pin given inode (via nameidata). * Caller must ensure it only calls inotify_add_watch() once per watch. * Calls inotify_handle_get_wd() so may sleep. */ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, struct inode *inode, u32 mask) { int ret = 0; int newly_watched; /* don't allow invalid bits: we don't want flags set */ mask &= IN_ALL_EVENTS | IN_ONESHOT; if (unlikely(!mask)) return -EINVAL; watch->mask = mask; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* Initialize a new watch */ ret = inotify_handle_get_wd(ih, watch); if (unlikely(ret)) goto out; ret = watch->wd; /* save a reference to handle and bump the count to make it official */ get_inotify_handle(ih); watch->ih = ih; /* * Save a reference to the inode and bump the ref count to make it * official. We hold a reference to nameidata, which makes this safe. */ watch->inode = igrab(inode); /* Add the watch to the handle's and the inode's list */ newly_watched = !inotify_inode_watched(inode); list_add(&watch->h_list, &ih->watches); list_add(&watch->i_list, &inode->inotify_watches); /* * Set child flags _after_ adding the watch, so there is no race * windows where newly instantiated children could miss their parent's * watched flag. */ if (newly_watched) set_dentry_child_flags(inode, 1); out: mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); return ret; } EXPORT_SYMBOL_GPL(inotify_add_watch); /** * inotify_clone_watch - put the watch next to existing one * @old: already installed watch * @new: new watch * * Caller must hold the inotify_mutex of inode we are dealing with; * it is expected to remove the old watch before unlocking the inode. */ s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new) { struct inotify_handle *ih = old->ih; int ret = 0; new->mask = old->mask; new->ih = ih; mutex_lock(&ih->mutex); /* Initialize a new watch */ ret = inotify_handle_get_wd(ih, new); if (unlikely(ret)) goto out; ret = new->wd; get_inotify_handle(ih); new->inode = igrab(old->inode); list_add(&new->h_list, &ih->watches); list_add(&new->i_list, &old->inode->inotify_watches); out: mutex_unlock(&ih->mutex); return ret; } void inotify_evict_watch(struct inotify_watch *watch) { get_inotify_watch(watch); mutex_lock(&watch->ih->mutex); inotify_remove_watch_locked(watch->ih, watch); mutex_unlock(&watch->ih->mutex); } /** * inotify_rm_wd - remove a watch from an inotify instance * @ih: inotify handle * @wd: watch descriptor to remove * * Can sleep. */ int inotify_rm_wd(struct inotify_handle *ih, u32 wd) { struct inotify_watch *watch; struct super_block *sb; struct inode *inode; int how; mutex_lock(&ih->mutex); watch = idr_find(&ih->idr, wd); if (unlikely(!watch)) { mutex_unlock(&ih->mutex); return -EINVAL; } sb = watch->inode->i_sb; how = pin_to_kill(ih, watch); if (!how) return 0; inode = watch->inode; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* make sure that we did not race */ if (likely(idr_find(&ih->idr, wd) == watch)) inotify_remove_watch_locked(ih, watch); mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); unpin_and_kill(watch, how); return 0; } EXPORT_SYMBOL_GPL(inotify_rm_wd); /** * inotify_rm_watch - remove a watch from an inotify instance * @ih: inotify handle * @watch: watch to remove * * Can sleep. */ int inotify_rm_watch(struct inotify_handle *ih, struct inotify_watch *watch) { return inotify_rm_wd(ih, watch->wd); } EXPORT_SYMBOL_GPL(inotify_rm_watch); /* * inotify_setup - core initialization function */ static int __init inotify_setup(void) { BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); BUILD_BUG_ON(IN_OPEN != FS_OPEN); BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); BUILD_BUG_ON(IN_CREATE != FS_CREATE); BUILD_BUG_ON(IN_DELETE != FS_DELETE); BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR); BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT); atomic_set(&inotify_cookie, 0); return 0; } module_init(inotify_setup);
gpl-2.0
wikimedia/operations-debs-linux
arch/arm/kernel/probes-thumb.c
602
32363
/* * arch/arm/kernel/probes-thumb.c * * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/module.h> #include "probes.h" #include "probes-thumb.h" static const union decode_item t32_table_1110_100x_x0xx[] = { /* Load/store multiple instructions */ /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe4f0000, 0xe80f0000), /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffc00000, 0xe8000000), /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */ /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffc00000, 0xe9800000), /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe508000, 0xe8008000), /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */ DECODE_REJECT (0xfe50c000, 0xe810c000), /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */ DECODE_REJECT (0xfe402000, 0xe8002000), /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */ /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */ /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */ /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_CUSTOM (0xfe400000, 0xe8000000, PROBES_T32_LDMSTM), DECODE_END }; static const union decode_item t32_table_1110_100x_x1xx[] = { /* Load/store dual, load/store exclusive, table branch */ /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */ /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */ DECODE_OR (0xff600000, 0xe8600000), /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */ /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xff400000, 0xe9400000, PROBES_T32_LDRDSTRD, REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)), /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */ /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */ DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, PROBES_T32_TABLE_BRANCH, REGS(NOSP, 0, 0, 0, NOSPPC)), /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */ /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */ /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */ /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */ /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */ /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */ /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */ /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */ /* And unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1110_101x[] = { /* Data-processing (shifted register) */ /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */ /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xff700f00, 0xea100f00, PROBES_T32_TST, REGS(NOSPPC, 0, 0, 0, NOSPPC)), /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */ DECODE_OR (0xfff00f00, 0xeb100f00), /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfff00f00, 0xebb00f00, PROBES_T32_TST, REGS(NOPC, 0, 0, 0, NOSPPC)), /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */ /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xffcf0000, 0xea4f0000, PROBES_T32_MOV, REGS(0, 0, NOSPPC, 0, NOSPPC)), /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */ /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffa00000, 0xeaa00000), /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xeb200000), /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xeb800000), /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xffe00000, 0xebe00000), /* ADD/SUB SP, SP, Rm, LSL #0..3 */ /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */ DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, PROBES_T32_ADDSUB, REGS(SP, 0, SP, 0, NOSPPC)), /* ADD/SUB SP, SP, Rm, shift */ /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */ DECODE_REJECT (0xff4f0f00, 0xeb0d0d00), /* ADD/SUB Rd, SP, Rm, shift */ /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, PROBES_T32_ADDSUB, REGS(SP, 0, NOPC, 0, NOSPPC)), /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */ /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */ /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */ /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */ /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */ /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */ /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */ /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */ /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */ /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */ /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfe000000, 0xea000000, PROBES_T32_LOGICAL, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), DECODE_END }; static const union decode_item t32_table_1111_0x0x___0[] = { /* Data-processing (modified immediate) */ /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */ /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfb708f00, 0xf0100f00, PROBES_T32_TST, REGS(NOSPPC, 0, 0, 0, 0)), /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */ DECODE_OR (0xfbf08f00, 0xf1100f00), /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */ DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, PROBES_T32_CMP, REGS(NOPC, 0, 0, 0, 0)), /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */ /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, PROBES_T32_MOV, REGS(0, 0, NOSPPC, 0, 0)), /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf0a00000), /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */ /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbc08000, 0xf0c00000), /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1200000), /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1800000), /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */ DECODE_REJECT (0xfbe08000, 0xf1e00000), /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */ /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, PROBES_T32_ADDSUB, REGS(SP, 0, NOPC, 0, 0)), /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */ /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */ /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */ /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */ /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */ /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */ /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */ /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */ /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */ /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfa008000, 0xf0000000, PROBES_T32_LOGICAL, REGS(NOSPPC, 0, NOSPPC, 0, 0)), DECODE_END }; static const union decode_item t32_table_1111_0x1x___0[] = { /* Data-processing (plain binary immediate) */ /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */ DECODE_OR (0xfbff8000, 0xf20f0000), /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbff8000, 0xf2af0000, PROBES_T32_ADDWSUBW_PC, REGS(PC, 0, NOSPPC, 0, 0)), /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */ DECODE_OR (0xfbff8f00, 0xf20d0d00), /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */ DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, PROBES_T32_ADDWSUBW, REGS(SP, 0, SP, 0, 0)), /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */ DECODE_OR (0xfbf08000, 0xf2000000), /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbf08000, 0xf2a00000, PROBES_T32_ADDWSUBW, REGS(NOPCX, 0, NOSPPC, 0, 0)), /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */ /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb708000, 0xf2400000, PROBES_T32_MOVW, REGS(0, 0, NOSPPC, 0, 0)), /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */ /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */ /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */ /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb508000, 0xf3000000, PROBES_T32_SAT, REGS(NOSPPC, 0, NOSPPC, 0, 0)), /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */ /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfb708000, 0xf3400000, PROBES_T32_BITFIELD, REGS(NOSPPC, 0, NOSPPC, 0, 0)), /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbff8000, 0xf36f0000, PROBES_T32_BITFIELD, REGS(0, 0, NOSPPC, 0, 0)), /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfbf08000, 0xf3600000, PROBES_T32_BITFIELD, REGS(NOSPPCX, 0, NOSPPC, 0, 0)), DECODE_END }; static const union decode_item t32_table_1111_0xxx___1[] = { /* Branches and miscellaneous control */ /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */ DECODE_OR (0xfff0d7ff, 0xf3a08001), /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */ DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, PROBES_T32_SEV), /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */ /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */ /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */ DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, PROBES_T32_WFE), /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */ DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, PROBES_T32_MRS, REGS(0, 0, NOSPPC, 0, 0)), /* * Unsupported instructions * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx * * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx */ DECODE_REJECT (0xfb80d000, 0xf3808000), /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */ DECODE_CUSTOM (0xf800d000, 0xf0008000, PROBES_T32_BRANCH_COND), /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */ DECODE_OR (0xf800d001, 0xf000c000), /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */ /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */ DECODE_SIMULATE (0xf8009000, 0xf0009000, PROBES_T32_BRANCH), DECODE_END }; static const union decode_item t32_table_1111_100x_x0x1__1111[] = { /* Memory hints */ /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */ /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */ DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, PROBES_T32_PLDI), /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */ DECODE_OR (0xffd0f000, 0xf890f000), /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */ DECODE_OR (0xffd0ff00, 0xf810fc00), /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */ DECODE_OR (0xfff0f000, 0xf990f000), /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */ DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, PROBES_T32_PLDI, REGS(NOPCX, 0, 0, 0, 0)), /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */ DECODE_OR (0xffd0ffc0, 0xf810f000), /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */ DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, PROBES_T32_PLDI, REGS(NOPCX, 0, 0, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_100x[] = { /* Store/Load single data item */ /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfe600000, 0xf8600000), /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfff00000, 0xf9500000), /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */ DECODE_REJECT (0xfe800d00, 0xf8000800), /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */ /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */ /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */ /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */ /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */ /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */ /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */ /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */ DECODE_REJECT (0xfe800f00, 0xf8000e00), /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xff1f0000, 0xf80f0000), /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */ DECODE_REJECT (0xff10f000, 0xf800f000), /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */ DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, PROBES_T32_LDR_LIT, REGS(PC, ANY, 0, 0, 0)), /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */ /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */ DECODE_OR (0xffe00800, 0xf8400800), /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */ /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xffe00000, 0xf8c00000, PROBES_T32_LDRSTR, REGS(NOPCX, ANY, 0, 0, 0)), /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */ /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */ DECODE_EMULATEX (0xffe00fc0, 0xf8400000, PROBES_T32_LDRSTR, REGS(NOPCX, ANY, 0, 0, NOSPPC)), /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */ /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, PROBES_T32_LDR_LIT, REGS(PC, NOSPPCX, 0, 0, 0)), /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */ /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */ /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */ /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */ /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */ DECODE_OR (0xfec00800, 0xf8000800), /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */ /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */ /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */ /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */ /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */ /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */ DECODE_EMULATEX (0xfec00000, 0xf8800000, PROBES_T32_LDRSTR, REGS(NOPCX, NOSPPCX, 0, 0, 0)), /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */ /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */ /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */ /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */ /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */ DECODE_EMULATEX (0xfe800fc0, 0xf8000000, PROBES_T32_LDRSTR, REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1010___1111[] = { /* Data-processing (register) */ /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */ DECODE_REJECT (0xffe0f080, 0xfa60f080), /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */ /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */ /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */ /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */ /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */ /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */ DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, PROBES_T32_SIGN_EXTEND, REGS(0, 0, NOSPPC, 0, NOSPPC)), /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */ DECODE_REJECT (0xff80f0b0, 0xfa80f030), /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */ DECODE_REJECT (0xffb0f080, 0xfab0f000), /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */ /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */ /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */ /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */ /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */ /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */ /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */ /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */ /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */ /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */ /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */ /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */ /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */ /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */ /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */ /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */ /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */ /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */ /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */ /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */ /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */ /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */ /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */ /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */ /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */ /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */ /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */ /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */ /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */ /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */ /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */ /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */ /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */ /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */ /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */ /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */ DECODE_OR (0xff80f080, 0xfa80f000), /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */ /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */ /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */ /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */ DECODE_OR (0xff80f080, 0xfa00f080), /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */ /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */ /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */ /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */ DECODE_OR (0xfff0f0c0, 0xfa80f080), /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ DECODE_OR (0xfff0f0f0, 0xfaa0f080), /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */ /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */ /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */ /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */ DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, PROBES_T32_MEDIA, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ DECODE_OR (0xfff0f0f0, 0xfab0f080), /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */ /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */ /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */ /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */ DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, PROBES_T32_REVERSE, REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1011_0[] = { /* Multiply, multiply accumulate, and absolute difference */ /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */ DECODE_REJECT (0xfff0f0f0, 0xfb00f010), /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */ DECODE_REJECT (0xfff0f0f0, 0xfb70f010), /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */ DECODE_OR (0xfff0f0c0, 0xfb10f000), /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */ /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */ /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */ /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */ /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */ /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */ DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, PROBES_T32_MUL_ADD, REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */ DECODE_REJECT (0xfff000f0, 0xfb700010), /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */ DECODE_OR (0xfff000c0, 0xfb100000), /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */ /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */ /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */ /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */ /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */ /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */ /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */ /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */ DECODE_EMULATEX (0xff8000c0, 0xfb000000, PROBES_T32_MUL_ADD2, REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)), /* Other unallocated instructions... */ DECODE_END }; static const union decode_item t32_table_1111_1011_1[] = { /* Long multiply, long multiply accumulate, and divide */ /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */ DECODE_OR (0xfff000f0, 0xfbe00060), /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */ DECODE_OR (0xfff000c0, 0xfbc00080), /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */ /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */ DECODE_OR (0xffe000e0, 0xfbc000c0), /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */ /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */ /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */ /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */ DECODE_EMULATEX (0xff9000f0, 0xfb800000, PROBES_T32_MUL_ADD_LONG, REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)), /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */ /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */ /* Other unallocated instructions... */ DECODE_END }; const union decode_item probes_decode_thumb32_table[] = { /* * Load/store multiple instructions * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx), /* * Load/store dual, load/store exclusive, table branch * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx), /* * Data-processing (shifted register) * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x), /* * Coprocessor instructions * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xfc000000, 0xec000000), /* * Data-processing (modified immediate) * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx */ DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0), /* * Data-processing (plain binary immediate) * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx */ DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0), /* * Branches and miscellaneous control * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx */ DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1), /* * Advanced SIMD element or structure load/store instructions * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx */ DECODE_REJECT (0xff100000, 0xf9000000), /* * Memory hints * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx */ DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111), /* * Store single data item * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx * Load single data items * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x), /* * Data-processing (register) * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx */ DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111), /* * Multiply, multiply accumulate, and absolute difference * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0), /* * Long multiply, long multiply accumulate, and divide * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx */ DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1), /* * Coprocessor instructions * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx */ DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_thumb32_table); #endif static const union decode_item t16_table_1011[] = { /* Miscellaneous 16-bit instructions */ /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */ /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */ DECODE_SIMULATE (0xff00, 0xb000, PROBES_T16_ADD_SP), /* CBZ 1011 00x1 xxxx xxxx */ /* CBNZ 1011 10x1 xxxx xxxx */ DECODE_SIMULATE (0xf500, 0xb100, PROBES_T16_CBZ), /* SXTH 1011 0010 00xx xxxx */ /* SXTB 1011 0010 01xx xxxx */ /* UXTH 1011 0010 10xx xxxx */ /* UXTB 1011 0010 11xx xxxx */ /* REV 1011 1010 00xx xxxx */ /* REV16 1011 1010 01xx xxxx */ /* ??? 1011 1010 10xx xxxx */ /* REVSH 1011 1010 11xx xxxx */ DECODE_REJECT (0xffc0, 0xba80), DECODE_EMULATE (0xf500, 0xb000, PROBES_T16_SIGN_EXTEND), /* PUSH 1011 010x xxxx xxxx */ DECODE_CUSTOM (0xfe00, 0xb400, PROBES_T16_PUSH), /* POP 1011 110x xxxx xxxx */ DECODE_CUSTOM (0xfe00, 0xbc00, PROBES_T16_POP), /* * If-Then, and hints * 1011 1111 xxxx xxxx */ /* YIELD 1011 1111 0001 0000 */ DECODE_OR (0xffff, 0xbf10), /* SEV 1011 1111 0100 0000 */ DECODE_EMULATE (0xffff, 0xbf40, PROBES_T16_SEV), /* NOP 1011 1111 0000 0000 */ /* WFE 1011 1111 0010 0000 */ /* WFI 1011 1111 0011 0000 */ DECODE_SIMULATE (0xffcf, 0xbf00, PROBES_T16_WFE), /* Unassigned hints 1011 1111 xxxx 0000 */ DECODE_REJECT (0xff0f, 0xbf00), /* IT 1011 1111 xxxx xxxx */ DECODE_CUSTOM (0xff00, 0xbf00, PROBES_T16_IT), /* SETEND 1011 0110 010x xxxx */ /* CPS 1011 0110 011x xxxx */ /* BKPT 1011 1110 xxxx xxxx */ /* And unallocated instructions... */ DECODE_END }; const union decode_item probes_decode_thumb16_table[] = { /* * Shift (immediate), add, subtract, move, and compare * 00xx xxxx xxxx xxxx */ /* CMP (immediate) 0010 1xxx xxxx xxxx */ DECODE_EMULATE (0xf800, 0x2800, PROBES_T16_CMP), /* ADD (register) 0001 100x xxxx xxxx */ /* SUB (register) 0001 101x xxxx xxxx */ /* LSL (immediate) 0000 0xxx xxxx xxxx */ /* LSR (immediate) 0000 1xxx xxxx xxxx */ /* ASR (immediate) 0001 0xxx xxxx xxxx */ /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */ /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */ /* MOV (immediate) 0010 0xxx xxxx xxxx */ /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */ /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */ DECODE_EMULATE (0xc000, 0x0000, PROBES_T16_ADDSUB), /* * 16-bit Thumb data-processing instructions * 0100 00xx xxxx xxxx */ /* TST (register) 0100 0010 00xx xxxx */ DECODE_EMULATE (0xffc0, 0x4200, PROBES_T16_CMP), /* CMP (register) 0100 0010 10xx xxxx */ /* CMN (register) 0100 0010 11xx xxxx */ DECODE_EMULATE (0xff80, 0x4280, PROBES_T16_CMP), /* AND (register) 0100 0000 00xx xxxx */ /* EOR (register) 0100 0000 01xx xxxx */ /* LSL (register) 0100 0000 10xx xxxx */ /* LSR (register) 0100 0000 11xx xxxx */ /* ASR (register) 0100 0001 00xx xxxx */ /* ADC (register) 0100 0001 01xx xxxx */ /* SBC (register) 0100 0001 10xx xxxx */ /* ROR (register) 0100 0001 11xx xxxx */ /* RSB (immediate) 0100 0010 01xx xxxx */ /* ORR (register) 0100 0011 00xx xxxx */ /* MUL 0100 0011 00xx xxxx */ /* BIC (register) 0100 0011 10xx xxxx */ /* MVN (register) 0100 0011 10xx xxxx */ DECODE_EMULATE (0xfc00, 0x4000, PROBES_T16_LOGICAL), /* * Special data instructions and branch and exchange * 0100 01xx xxxx xxxx */ /* BLX pc 0100 0111 1111 1xxx */ DECODE_REJECT (0xfff8, 0x47f8), /* BX (register) 0100 0111 0xxx xxxx */ /* BLX (register) 0100 0111 1xxx xxxx */ DECODE_SIMULATE (0xff00, 0x4700, PROBES_T16_BLX), /* ADD pc, pc 0100 0100 1111 1111 */ DECODE_REJECT (0xffff, 0x44ff), /* ADD (register) 0100 0100 xxxx xxxx */ /* CMP (register) 0100 0101 xxxx xxxx */ /* MOV (register) 0100 0110 xxxx xxxx */ DECODE_CUSTOM (0xfc00, 0x4400, PROBES_T16_HIREGOPS), /* * Load from Literal Pool * LDR (literal) 0100 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf800, 0x4800, PROBES_T16_LDR_LIT), /* * 16-bit Thumb Load/store instructions * 0101 xxxx xxxx xxxx * 011x xxxx xxxx xxxx * 100x xxxx xxxx xxxx */ /* STR (register) 0101 000x xxxx xxxx */ /* STRH (register) 0101 001x xxxx xxxx */ /* STRB (register) 0101 010x xxxx xxxx */ /* LDRSB (register) 0101 011x xxxx xxxx */ /* LDR (register) 0101 100x xxxx xxxx */ /* LDRH (register) 0101 101x xxxx xxxx */ /* LDRB (register) 0101 110x xxxx xxxx */ /* LDRSH (register) 0101 111x xxxx xxxx */ /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */ /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */ /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */ /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */ DECODE_EMULATE (0xc000, 0x4000, PROBES_T16_LDRHSTRH), /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */ /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */ DECODE_EMULATE (0xf000, 0x8000, PROBES_T16_LDRHSTRH), /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */ /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf000, 0x9000, PROBES_T16_LDRSTR), /* * Generate PC-/SP-relative address * ADR (literal) 1010 0xxx xxxx xxxx * ADD (SP plus immediate) 1010 1xxx xxxx xxxx */ DECODE_SIMULATE (0xf000, 0xa000, PROBES_T16_ADR), /* * Miscellaneous 16-bit instructions * 1011 xxxx xxxx xxxx */ DECODE_TABLE (0xf000, 0xb000, t16_table_1011), /* STM 1100 0xxx xxxx xxxx */ /* LDM 1100 1xxx xxxx xxxx */ DECODE_EMULATE (0xf000, 0xc000, PROBES_T16_LDMSTM), /* * Conditional branch, and Supervisor Call */ /* Permanently UNDEFINED 1101 1110 xxxx xxxx */ /* SVC 1101 1111 xxxx xxxx */ DECODE_REJECT (0xfe00, 0xde00), /* Conditional branch 1101 xxxx xxxx xxxx */ DECODE_CUSTOM (0xf000, 0xd000, PROBES_T16_BRANCH_COND), /* * Unconditional branch * B 1110 0xxx xxxx xxxx */ DECODE_SIMULATE (0xf800, 0xe000, PROBES_T16_BRANCH), DECODE_END }; #ifdef CONFIG_ARM_KPROBES_TEST_MODULE EXPORT_SYMBOL_GPL(probes_decode_thumb16_table); #endif static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) { if (unlikely(in_it_block(cpsr))) return probes_condition_checks[current_cond(cpsr)](cpsr); return true; } static void __kprobes thumb16_singlestep(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 2; asi->insn_handler(opcode, asi, regs); regs->ARM_cpsr = it_advance(regs->ARM_cpsr); } static void __kprobes thumb32_singlestep(probes_opcode_t opcode, struct arch_probes_insn *asi, struct pt_regs *regs) { regs->ARM_pc += 4; asi->insn_handler(opcode, asi, regs); regs->ARM_cpsr = it_advance(regs->ARM_cpsr); } enum probes_insn __kprobes thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions) { asi->insn_singlestep = thumb16_singlestep; asi->insn_check_cc = thumb_check_cc; return probes_decode_insn(insn, asi, probes_decode_thumb16_table, true, emulate, actions); } enum probes_insn __kprobes thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions) { asi->insn_singlestep = thumb32_singlestep; asi->insn_check_cc = thumb_check_cc; return probes_decode_insn(insn, asi, probes_decode_thumb32_table, true, emulate, actions); }
gpl-2.0
pritanshchandra/purex_kernel_xolo_black
net/ipv4/tcp_metrics.c
1626
28137
#include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/tcp.h> #include <linux/hash.h> #include <linux/tcp_metrics.h> #include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/net_namespace.h> #include <net/request_sock.h> #include <net/inetpeer.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/dst.h> #include <net/tcp.h> #include <net/genetlink.h> int sysctl_tcp_nometrics_save __read_mostly; static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr, struct net *net, unsigned int hash); struct tcp_fastopen_metrics { u16 mss; u16 syn_loss:10; /* Recurring Fast Open SYN losses */ unsigned long last_syn_loss; /* Last Fast Open SYN loss */ struct tcp_fastopen_cookie cookie; }; struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; struct inetpeer_addr tcpm_addr; unsigned long tcpm_stamp; u32 tcpm_ts; u32 tcpm_ts_stamp; u32 tcpm_lock; u32 tcpm_vals[TCP_METRIC_MAX + 1]; struct tcp_fastopen_metrics tcpm_fastopen; struct rcu_head rcu_head; }; static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { return tm->tcpm_lock & (1 << idx); } static u32 tcp_metric_get(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { return tm->tcpm_vals[idx]; } static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { return msecs_to_jiffies(tm->tcpm_vals[idx]); } static void tcp_metric_set(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { tm->tcpm_vals[idx] = val; } static void tcp_metric_set_msecs(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { tm->tcpm_vals[idx] = jiffies_to_msecs(val); } static bool addr_same(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { const struct in6_addr *a6, *b6; if (a->family != b->family) return false; if (a->family == AF_INET) return a->addr.a4 == b->addr.a4; a6 = (const struct in6_addr *) &a->addr.a6[0]; b6 = (const struct in6_addr *) &b->addr.a6[0]; return ipv6_addr_equal(a6, b6); } struct tcpm_hash_bucket { struct tcp_metrics_block __rcu *chain; }; static DEFINE_SPINLOCK(tcp_metrics_lock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst, bool fastopen_clear) { u32 val; tm->tcpm_stamp = jiffies; val = 0; if (dst_metric_locked(dst, RTAX_RTT)) val |= 1 << TCP_METRIC_RTT; if (dst_metric_locked(dst, RTAX_RTTVAR)) val |= 1 << TCP_METRIC_RTTVAR; if (dst_metric_locked(dst, RTAX_SSTHRESH)) val |= 1 << TCP_METRIC_SSTHRESH; if (dst_metric_locked(dst, RTAX_CWND)) val |= 1 << TCP_METRIC_CWND; if (dst_metric_locked(dst, RTAX_REORDERING)) val |= 1 << TCP_METRIC_REORDERING; tm->tcpm_lock = val; tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT); tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR); tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); tm->tcpm_ts = 0; tm->tcpm_ts_stamp = 0; if (fastopen_clear) { tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; tm->tcpm_fastopen.cookie.len = 0; } } #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) { if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) tcpm_suck_dst(tm, dst, false); } #define TCP_METRICS_RECLAIM_DEPTH 5 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, struct inetpeer_addr *addr, unsigned int hash) { struct tcp_metrics_block *tm; struct net *net; bool reclaim = false; spin_lock_bh(&tcp_metrics_lock); net = dev_net(dst->dev); /* While waiting for the spin-lock the cache might have been populated * with this entry and so we have to check again. */ tm = __tcp_get_metrics(addr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) { reclaim = true; tm = NULL; } if (tm) { tcpm_check_stamp(tm, dst); goto out_unlock; } if (unlikely(reclaim)) { struct tcp_metrics_block *oldest; oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); for (tm = rcu_dereference(oldest->tcpm_next); tm; tm = rcu_dereference(tm->tcpm_next)) { if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) oldest = tm; } tm = oldest; } else { tm = kmalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } tm->tcpm_addr = *addr; tcpm_suck_dst(tm, dst, true); if (likely(!reclaim)) { tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain; rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm); } out_unlock: spin_unlock_bh(&tcp_metrics_lock); return tm; } static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) { if (tm) return tm; if (depth > TCP_METRICS_RECLAIM_DEPTH) return TCP_METRICS_RECLAIM_PTR; return NULL; } static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr, struct net *net, unsigned int hash) { struct tcp_metrics_block *tm; int depth = 0; for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_addr, addr)) break; depth++; } return tcp_get_encode(tm, depth); } static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, struct dst_entry *dst) { struct tcp_metrics_block *tm; struct inetpeer_addr addr; unsigned int hash; struct net *net; addr.family = req->rsk_ops->family; switch (addr.family) { case AF_INET: addr.addr.a4 = inet_rsk(req)->rmt_addr; hash = (__force unsigned int) addr.addr.a4; break; case AF_INET6: *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr; hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr); break; default: return NULL; } net = dev_net(dst->dev); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_addr, &addr)) break; } tcpm_check_stamp(tm, dst); return tm; } static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw) { struct inet6_timewait_sock *tw6; struct tcp_metrics_block *tm; struct inetpeer_addr addr; unsigned int hash; struct net *net; addr.family = tw->tw_family; switch (addr.family) { case AF_INET: addr.addr.a4 = tw->tw_daddr; hash = (__force unsigned int) addr.addr.a4; break; case AF_INET6: tw6 = inet6_twsk((struct sock *)tw); *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr; hash = ipv6_addr_hash(&tw6->tw_v6_daddr); break; default: return NULL; } net = twsk_net(tw); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_addr, &addr)) break; } return tm; } static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, struct dst_entry *dst, bool create) { struct tcp_metrics_block *tm; struct inetpeer_addr addr; unsigned int hash; struct net *net; addr.family = sk->sk_family; switch (addr.family) { case AF_INET: addr.addr.a4 = inet_sk(sk)->inet_daddr; hash = (__force unsigned int) addr.addr.a4; break; case AF_INET6: *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr; hash = ipv6_addr_hash(&inet6_sk(sk)->daddr); break; default: return NULL; } net = dev_net(dst->dev); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); tm = __tcp_get_metrics(&addr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) tm = NULL; if (!tm && create) tm = tcpm_new(dst, &addr, hash); else tcpm_check_stamp(tm, dst); return tm; } /* Save metrics learned by this TCP session. This function is called * only, when TCP finishes successfully i.e. when it enters TIME-WAIT * or goes from LAST-ACK to CLOSE. */ void tcp_update_metrics(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_metrics_block *tm; unsigned long rtt; u32 val; int m; if (sysctl_tcp_nometrics_save || !dst) return; if (dst->flags & DST_HOST) dst_confirm(dst); rcu_read_lock(); if (icsk->icsk_backoff || !tp->srtt) { /* This session failed to estimate rtt. Why? * Probably, no packets returned in time. Reset our * results. */ tm = tcp_get_metrics(sk, dst, false); if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) tcp_metric_set(tm, TCP_METRIC_RTT, 0); goto out_unlock; } else tm = tcp_get_metrics(sk, dst, true); if (!tm) goto out_unlock; rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT); m = rtt - tp->srtt; /* If newly calculated rtt larger than stored one, store new * one. Otherwise, use EWMA. Remember, rtt overestimation is * always better than underestimation. */ if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { if (m <= 0) rtt = tp->srtt; else rtt -= (m >> 3); tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt); } if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { unsigned long var; if (m < 0) m = -m; /* Scale deviation to rttvar fixed point */ m >>= 1; if (m < tp->mdev) m = tp->mdev; var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); if (m >= var) var = m; else var -= (var - m) >> 2; tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var); } if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && (tp->snd_cwnd >> 1) > val) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, tp->snd_cwnd >> 1); } if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); if (tp->snd_cwnd > val) tcp_metric_set(tm, TCP_METRIC_CWND, tp->snd_cwnd); } } else if (tp->snd_cwnd > tp->snd_ssthresh && icsk->icsk_ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1); } } else { /* Else slow start did not finish, cwnd is non-sense, * ssthresh may be also invalid. */ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_ssthresh) >> 1); } if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && tp->snd_ssthresh > val) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, tp->snd_ssthresh); } if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val < tp->reordering && tp->reordering != sysctl_tcp_reordering) tcp_metric_set(tm, TCP_METRIC_REORDERING, tp->reordering); } } tm->tcpm_stamp = jiffies; out_unlock: rcu_read_unlock(); } /* Initialize metrics on socket. */ void tcp_init_metrics(struct sock *sk) { struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_metrics_block *tm; u32 val; if (dst == NULL) goto reset; dst_confirm(dst); rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (!tm) { rcu_read_unlock(); goto reset; } if (tcp_metric_locked(tm, TCP_METRIC_CWND)) tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val) { tp->snd_ssthresh = val; if (tp->snd_ssthresh > tp->snd_cwnd_clamp) tp->snd_ssthresh = tp->snd_cwnd_clamp; } else { /* ssthresh may have been reduced unnecessarily during. * 3WHS. Restore it back to its initial default. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; } val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) { tcp_disable_fack(tp); tcp_disable_early_retrans(tp); tp->reordering = val; } val = tcp_metric_get(tm, TCP_METRIC_RTT); if (val == 0 || tp->srtt == 0) { rcu_read_unlock(); goto reset; } /* Initial rtt is determined from SYN,SYN-ACK. * The segment is small and rtt may appear much * less than real one. Use per-dst memory * to make it more realistic. * * A bit of theory. RTT is time passed after "normal" sized packet * is sent until it is ACKed. In normal circumstances sending small * packets force peer to delay ACKs and calculation is correct too. * The algorithm is adaptive and, provided we follow specs, it * NEVER underestimate RTT. BUT! If peer tries to make some clever * tricks sort of "quick acks" for time long enough to decrease RTT * to low value, and then abruptly stops to do it and starts to delay * ACKs, wait for troubles. */ val = msecs_to_jiffies(val); if (val > tp->srtt) { tp->srtt = val; tp->rtt_seq = tp->snd_nxt; } val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR); if (val > tp->mdev) { tp->mdev = val; tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); } rcu_read_unlock(); tcp_set_rto(sk); reset: if (tp->srtt == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious * retransmission. */ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been * retransmitted. In light of RFC6298 more aggressive 1sec * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK * retransmission has occurred. */ if (tp->total_retrans > 1) tp->snd_cwnd = 1; else tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd_stamp = tcp_time_stamp; } bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check) { struct tcp_metrics_block *tm; bool ret; if (!dst) return false; rcu_read_lock(); tm = __tcp_get_metrics_req(req, dst); if (paws_check) { if (tm && (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW) ret = false; else ret = true; } else { if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp) ret = true; else ret = false; } rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(tcp_peer_is_proven); void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) { struct tcp_metrics_block *tm; rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (tm) { struct tcp_sock *tp = tcp_sk(sk); if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) { tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp; tp->rx_opt.ts_recent = tm->tcpm_ts; } } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp); /* VJ's idea. Save last timestamp seen from this destination and hold * it at least for normal timewait interval to use for duplicate * segment detection in subsequent connections, before they enter * synchronized state. */ bool tcp_remember_stamp(struct sock *sk) { struct dst_entry *dst = __sk_dst_get(sk); bool ret = false; if (dst) { struct tcp_metrics_block *tm; rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (tm) { struct tcp_sock *tp = tcp_sk(sk); if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 || ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) { tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp; tm->tcpm_ts = tp->rx_opt.ts_recent; } ret = true; } rcu_read_unlock(); } return ret; } bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw) { struct tcp_metrics_block *tm; bool ret = false; rcu_read_lock(); tm = __tcp_get_metrics_tw(tw); if (tm) { const struct tcp_timewait_sock *tcptw; struct sock *sk = (struct sock *) tw; tcptw = tcp_twsk(sk); if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 || ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL && tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) { tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp; tm->tcpm_ts = tcptw->tw_ts_recent; } ret = true; } rcu_read_unlock(); return ret; } static DEFINE_SEQLOCK(fastopen_seqlock); void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie, int *syn_loss, unsigned long *last_syn_loss) { struct tcp_metrics_block *tm; rcu_read_lock(); tm = tcp_get_metrics(sk, __sk_dst_get(sk), false); if (tm) { struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; unsigned int seq; do { seq = read_seqbegin(&fastopen_seqlock); if (tfom->mss) *mss = tfom->mss; *cookie = tfom->cookie; *syn_loss = tfom->syn_loss; *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0; } while (read_seqretry(&fastopen_seqlock, seq)); } rcu_read_unlock(); } void tcp_fastopen_cache_set(struct sock *sk, u16 mss, struct tcp_fastopen_cookie *cookie, bool syn_lost) { struct dst_entry *dst = __sk_dst_get(sk); struct tcp_metrics_block *tm; if (!dst) return; rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (tm) { struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; write_seqlock_bh(&fastopen_seqlock); tfom->mss = mss; if (cookie->len > 0) tfom->cookie = *cookie; if (syn_lost) { ++tfom->syn_loss; tfom->last_syn_loss = jiffies; } else tfom->syn_loss = 0; write_sequnlock_bh(&fastopen_seqlock); } rcu_read_unlock(); } static struct genl_family tcp_metrics_nl_family = { .id = GENL_ID_GENERATE, .hdrsize = 0, .name = TCP_METRICS_GENL_NAME, .version = TCP_METRICS_GENL_VERSION, .maxattr = TCP_METRICS_ATTR_MAX, .netnsok = true, }; static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = { [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, }, [TCP_METRICS_ATTR_ADDR_IPV6] = { .type = NLA_BINARY, .len = sizeof(struct in6_addr), }, /* Following attributes are not received for GET/DEL, * we keep them for reference */ #if 0 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, }, [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, }, [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, }, [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, }, [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, }, [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, }, [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, }, [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY, .len = TCP_FASTOPEN_COOKIE_MAX, }, #endif }; /* Add attributes, caller cancels its header on failure */ static int tcp_metrics_fill_info(struct sk_buff *msg, struct tcp_metrics_block *tm) { struct nlattr *nest; int i; switch (tm->tcpm_addr.family) { case AF_INET: if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4, tm->tcpm_addr.addr.a4) < 0) goto nla_put_failure; break; case AF_INET6: if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16, tm->tcpm_addr.addr.a6) < 0) goto nla_put_failure; break; default: return -EAFNOSUPPORT; } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, jiffies - tm->tcpm_stamp) < 0) goto nla_put_failure; if (tm->tcpm_ts_stamp) { if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP, (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0) goto nla_put_failure; if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL, tm->tcpm_ts) < 0) goto nla_put_failure; } { int n = 0; nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS); if (!nest) goto nla_put_failure; for (i = 0; i < TCP_METRIC_MAX + 1; i++) { if (!tm->tcpm_vals[i]) continue; if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0) goto nla_put_failure; n++; } if (n) nla_nest_end(msg, nest); else nla_nest_cancel(msg, nest); } { struct tcp_fastopen_metrics tfom_copy[1], *tfom; unsigned int seq; do { seq = read_seqbegin(&fastopen_seqlock); tfom_copy[0] = tm->tcpm_fastopen; } while (read_seqretry(&fastopen_seqlock, seq)); tfom = tfom_copy; if (tfom->mss && nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS, tfom->mss) < 0) goto nla_put_failure; if (tfom->syn_loss && (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, tfom->syn_loss) < 0 || nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, jiffies - tfom->last_syn_loss) < 0)) goto nla_put_failure; if (tfom->cookie.len > 0 && nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, tfom->cookie.len, tfom->cookie.val) < 0) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static int tcp_metrics_dump_info(struct sk_buff *skb, struct netlink_callback *cb, struct tcp_metrics_block *tm) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &tcp_metrics_nl_family, NLM_F_MULTI, TCP_METRICS_CMD_GET); if (!hdr) return -EMSGSIZE; if (tcp_metrics_fill_info(skb, tm) < 0) goto nla_put_failure; return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int tcp_metrics_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; unsigned int row, s_row = cb->args[0]; int s_col = cb->args[1], col = s_col; for (row = s_row; row < max_rows; row++, s_col = 0) { struct tcp_metrics_block *tm; struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row; rcu_read_lock(); for (col = 0, tm = rcu_dereference(hb->chain); tm; tm = rcu_dereference(tm->tcpm_next), col++) { if (col < s_col) continue; if (tcp_metrics_dump_info(skb, cb, tm) < 0) { rcu_read_unlock(); goto done; } } rcu_read_unlock(); } done: cb->args[0] = row; cb->args[1] = col; return skb->len; } static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, unsigned int *hash, int optional) { struct nlattr *a; a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4]; if (a) { addr->family = AF_INET; addr->addr.a4 = nla_get_be32(a); *hash = (__force unsigned int) addr->addr.a4; return 0; } a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6]; if (a) { if (nla_len(a) != sizeof(struct in6_addr)) return -EINVAL; addr->family = AF_INET6; memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6)); *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6); return 0; } return optional ? 1 : -EAFNOSUPPORT; } static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct tcp_metrics_block *tm; struct inetpeer_addr addr; unsigned int hash; struct sk_buff *msg; struct net *net = genl_info_net(info); void *reply; int ret; ret = parse_nl_addr(info, &addr, &hash, 0); if (ret < 0) return ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0, info->genlhdr->cmd); if (!reply) goto nla_put_failure; hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); ret = -ESRCH; rcu_read_lock(); for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_addr, &addr)) { ret = tcp_metrics_fill_info(msg, tm); break; } } rcu_read_unlock(); if (ret < 0) goto out_free; genlmsg_end(msg, reply); return genlmsg_reply(msg, info); nla_put_failure: ret = -EMSGSIZE; out_free: nlmsg_free(msg); return ret; } #define deref_locked_genl(p) \ rcu_dereference_protected(p, lockdep_genl_is_held() && \ lockdep_is_held(&tcp_metrics_lock)) #define deref_genl(p) rcu_dereference_protected(p, lockdep_genl_is_held()) static int tcp_metrics_flush_all(struct net *net) { unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log; struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash; struct tcp_metrics_block *tm; unsigned int row; for (row = 0; row < max_rows; row++, hb++) { spin_lock_bh(&tcp_metrics_lock); tm = deref_locked_genl(hb->chain); if (tm) hb->chain = NULL; spin_unlock_bh(&tcp_metrics_lock); while (tm) { struct tcp_metrics_block *next; next = deref_genl(tm->tcpm_next); kfree_rcu(tm, rcu_head); tm = next; } } return 0; } static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct tcpm_hash_bucket *hb; struct tcp_metrics_block *tm; struct tcp_metrics_block __rcu **pp; struct inetpeer_addr addr; unsigned int hash; struct net *net = genl_info_net(info); int ret; ret = parse_nl_addr(info, &addr, &hash, 1); if (ret < 0) return ret; if (ret > 0) return tcp_metrics_flush_all(net); hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log); hb = net->ipv4.tcp_metrics_hash + hash; pp = &hb->chain; spin_lock_bh(&tcp_metrics_lock); for (tm = deref_locked_genl(*pp); tm; pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) { if (addr_same(&tm->tcpm_addr, &addr)) { *pp = tm->tcpm_next; break; } } spin_unlock_bh(&tcp_metrics_lock); if (!tm) return -ESRCH; kfree_rcu(tm, rcu_head); return 0; } static struct genl_ops tcp_metrics_nl_ops[] = { { .cmd = TCP_METRICS_CMD_GET, .doit = tcp_metrics_nl_cmd_get, .dumpit = tcp_metrics_nl_dump, .policy = tcp_metrics_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TCP_METRICS_CMD_DEL, .doit = tcp_metrics_nl_cmd_del, .policy = tcp_metrics_nl_policy, .flags = GENL_ADMIN_PERM, }, }; static unsigned int tcpmhash_entries; static int __init set_tcpmhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtouint(str, 0, &tcpmhash_entries); if (ret) return 0; return 1; } __setup("tcpmhash_entries=", set_tcpmhash_entries); static int __net_init tcp_net_metrics_init(struct net *net) { size_t size; unsigned int slots; slots = tcpmhash_entries; if (!slots) { if (totalram_pages >= 128 * 1024) slots = 16 * 1024; else slots = 8 * 1024; } net->ipv4.tcp_metrics_hash_log = order_base_2(slots); size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log; net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!net->ipv4.tcp_metrics_hash) net->ipv4.tcp_metrics_hash = vzalloc(size); if (!net->ipv4.tcp_metrics_hash) return -ENOMEM; return 0; } static void __net_exit tcp_net_metrics_exit(struct net *net) { unsigned int i; for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { struct tcp_metrics_block *tm, *next; tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); while (tm) { next = rcu_dereference_protected(tm->tcpm_next, 1); kfree(tm); tm = next; } } if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash)) vfree(net->ipv4.tcp_metrics_hash); else kfree(net->ipv4.tcp_metrics_hash); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { .init = tcp_net_metrics_init, .exit = tcp_net_metrics_exit, }; void __init tcp_metrics_init(void) { int ret; ret = register_pernet_subsys(&tcp_net_metrics_ops); if (ret < 0) goto cleanup; ret = genl_register_family_with_ops(&tcp_metrics_nl_family, tcp_metrics_nl_ops, ARRAY_SIZE(tcp_metrics_nl_ops)); if (ret < 0) goto cleanup_subsys; return; cleanup_subsys: unregister_pernet_subsys(&tcp_net_metrics_ops); cleanup: return; }
gpl-2.0
Skin1980/sturgeon
drivers/acpi/acpica/exresnte.c
2138
8581
/****************************************************************************** * * Module Name: exresnte - AML Interpreter object resolution * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exresnte") /******************************************************************************* * * FUNCTION: acpi_ex_resolve_node_to_value * * PARAMETERS: object_ptr - Pointer to a location that contains * a pointer to a NS node, and will receive a * pointer to the resolved object. * walk_state - Current state. Valid only if executing AML * code. NULL if simply resolving an object * * RETURN: Status * * DESCRIPTION: Resolve a Namespace node to a valued object * * Note: for some of the data types, the pointer attached to the Node * can be either a pointer to an actual internal object or a pointer into the * AML stream itself. These types are currently: * * ACPI_TYPE_INTEGER * ACPI_TYPE_STRING * ACPI_TYPE_BUFFER * ACPI_TYPE_MUTEX * ACPI_TYPE_PACKAGE * ******************************************************************************/ acpi_status acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *source_desc; union acpi_operand_object *obj_desc = NULL; struct acpi_namespace_node *node; acpi_object_type entry_type; ACPI_FUNCTION_TRACE(ex_resolve_node_to_value); /* * The stack pointer points to a struct acpi_namespace_node (Node). Get the * object that is attached to the Node. */ node = *object_ptr; source_desc = acpi_ns_get_attached_object(node); entry_type = acpi_ns_get_type((acpi_handle) node); ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n", node, source_desc, acpi_ut_get_type_name(entry_type))); if ((entry_type == ACPI_TYPE_LOCAL_ALIAS) || (entry_type == ACPI_TYPE_LOCAL_METHOD_ALIAS)) { /* There is always exactly one level of indirection */ node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object); source_desc = acpi_ns_get_attached_object(node); entry_type = acpi_ns_get_type((acpi_handle) node); *object_ptr = node; } /* * Several object types require no further processing: * 1) Device/Thermal objects don't have a "real" subobject, return the Node * 2) Method locals and arguments have a pseudo-Node * 3) 10/2007: Added method type to assist with Package construction. */ if ((entry_type == ACPI_TYPE_DEVICE) || (entry_type == ACPI_TYPE_THERMAL) || (entry_type == ACPI_TYPE_METHOD) || (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) { return_ACPI_STATUS(AE_OK); } if (!source_desc) { ACPI_ERROR((AE_INFO, "No object attached to node %p", node)); return_ACPI_STATUS(AE_AML_NO_OPERAND); } /* * Action is based on the type of the Node, which indicates the type * of the attached object or pointer */ switch (entry_type) { case ACPI_TYPE_PACKAGE: if (source_desc->common.type != ACPI_TYPE_PACKAGE) { ACPI_ERROR((AE_INFO, "Object not a Package, type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } status = acpi_ds_get_package_arguments(source_desc); if (ACPI_SUCCESS(status)) { /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); } break; case ACPI_TYPE_BUFFER: if (source_desc->common.type != ACPI_TYPE_BUFFER) { ACPI_ERROR((AE_INFO, "Object not a Buffer, type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } status = acpi_ds_get_buffer_arguments(source_desc); if (ACPI_SUCCESS(status)) { /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); } break; case ACPI_TYPE_STRING: if (source_desc->common.type != ACPI_TYPE_STRING) { ACPI_ERROR((AE_INFO, "Object not a String, type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); break; case ACPI_TYPE_INTEGER: if (source_desc->common.type != ACPI_TYPE_INTEGER) { ACPI_ERROR((AE_INFO, "Object not a Integer, type %s", acpi_ut_get_object_type_name(source_desc))); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); break; case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "FieldRead Node=%p SourceDesc=%p Type=%X\n", node, source_desc, entry_type)); status = acpi_ex_read_data_from_field(walk_state, source_desc, &obj_desc); break; /* For these objects, just return the object attached to the Node */ case ACPI_TYPE_MUTEX: case ACPI_TYPE_POWER: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_EVENT: case ACPI_TYPE_REGION: /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); break; /* TYPE_ANY is untyped, and thus there is no object associated with it */ case ACPI_TYPE_ANY: ACPI_ERROR((AE_INFO, "Untyped entry %p, no attached object!", node)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); /* Cannot be AE_TYPE */ case ACPI_TYPE_LOCAL_REFERENCE: switch (source_desc->reference.class) { case ACPI_REFCLASS_TABLE: /* This is a ddb_handle */ case ACPI_REFCLASS_REFOF: case ACPI_REFCLASS_INDEX: /* Return an additional reference to the object */ obj_desc = source_desc; acpi_ut_add_reference(obj_desc); break; default: /* No named references are allowed here */ ACPI_ERROR((AE_INFO, "Unsupported Reference type 0x%X", source_desc->reference.class)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; default: /* Default case is for unknown types */ ACPI_ERROR((AE_INFO, "Node %p - Unknown object type 0x%X", node, entry_type)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* switch (entry_type) */ /* Return the object descriptor */ *object_ptr = (void *)obj_desc; return_ACPI_STATUS(status); }
gpl-2.0
SM-G920P/S6-MM
drivers/acpi/acpica/exresolv.c
2138
14799
/****************************************************************************** * * Module Name: exresolv - AML Interpreter object resolution * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exresolv") /* Local prototypes */ static acpi_status acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state); /******************************************************************************* * * FUNCTION: acpi_ex_resolve_to_value * * PARAMETERS: **stack_ptr - Points to entry on obj_stack, which can * be either an (union acpi_operand_object *) * or an acpi_handle. * walk_state - Current method state * * RETURN: Status * * DESCRIPTION: Convert Reference objects to values * ******************************************************************************/ acpi_status acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state) { acpi_status status; ACPI_FUNCTION_TRACE_PTR(ex_resolve_to_value, stack_ptr); if (!stack_ptr || !*stack_ptr) { ACPI_ERROR((AE_INFO, "Internal - null pointer")); return_ACPI_STATUS(AE_AML_NO_OPERAND); } /* * The entity pointed to by the stack_ptr can be either * 1) A valid union acpi_operand_object, or * 2) A struct acpi_namespace_node (named_obj) */ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_OPERAND) { status = acpi_ex_resolve_object_to_value(stack_ptr, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (!*stack_ptr) { ACPI_ERROR((AE_INFO, "Internal - null pointer")); return_ACPI_STATUS(AE_AML_NO_OPERAND); } } /* * Object on the stack may have changed if acpi_ex_resolve_object_to_value() * was called (i.e., we can't use an _else_ here.) */ if (ACPI_GET_DESCRIPTOR_TYPE(*stack_ptr) == ACPI_DESC_TYPE_NAMED) { status = acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR (struct acpi_namespace_node, stack_ptr), walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Resolved object %p\n", *stack_ptr)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_resolve_object_to_value * * PARAMETERS: stack_ptr - Pointer to an internal object * walk_state - Current method state * * RETURN: Status * * DESCRIPTION: Retrieve the value from an internal object. The Reference type * uses the associated AML opcode to determine the value. * ******************************************************************************/ static acpi_status acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_operand_object *stack_desc; union acpi_operand_object *obj_desc = NULL; u8 ref_type; ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); stack_desc = *stack_ptr; /* This is an object of type union acpi_operand_object */ switch (stack_desc->common.type) { case ACPI_TYPE_LOCAL_REFERENCE: ref_type = stack_desc->reference.class; switch (ref_type) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: /* * Get the local from the method's state info * Note: this increments the local's object reference count */ status = acpi_ds_method_data_get_value(ref_type, stack_desc-> reference.value, walk_state, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[Arg/Local %X] ValueObj is %p\n", stack_desc->reference.value, obj_desc)); /* * Now we can delete the original Reference Object and * replace it with the resolved value */ acpi_ut_remove_reference(stack_desc); *stack_ptr = obj_desc; break; case ACPI_REFCLASS_INDEX: switch (stack_desc->reference.target_type) { case ACPI_TYPE_BUFFER_FIELD: /* Just return - do not dereference */ break; case ACPI_TYPE_PACKAGE: /* If method call or copy_object - do not dereference */ if ((walk_state->opcode == AML_INT_METHODCALL_OP) || (walk_state->opcode == AML_COPY_OP)) { break; } /* Otherwise, dereference the package_index to a package element */ obj_desc = *stack_desc->reference.where; if (obj_desc) { /* * Valid object descriptor, copy pointer to return value * (i.e., dereference the package index) * Delete the ref object, increment the returned object */ acpi_ut_remove_reference(stack_desc); acpi_ut_add_reference(obj_desc); *stack_ptr = obj_desc; } else { /* * A NULL object descriptor means an uninitialized element of * the package, can't dereference it */ ACPI_ERROR((AE_INFO, "Attempt to dereference an Index to NULL package element Idx=%p", stack_desc)); status = AE_AML_UNINITIALIZED_ELEMENT; } break; default: /* Invalid reference object */ ACPI_ERROR((AE_INFO, "Unknown TargetType 0x%X in Index/Reference object %p", stack_desc->reference.target_type, stack_desc)); status = AE_AML_INTERNAL; break; } break; case ACPI_REFCLASS_REFOF: case ACPI_REFCLASS_DEBUG: case ACPI_REFCLASS_TABLE: /* Just leave the object as-is, do not dereference */ break; case ACPI_REFCLASS_NAME: /* Reference to a named object */ /* Dereference the name */ if ((stack_desc->reference.node->type == ACPI_TYPE_DEVICE) || (stack_desc->reference.node->type == ACPI_TYPE_THERMAL)) { /* These node types do not have 'real' subobjects */ *stack_ptr = (void *)stack_desc->reference.node; } else { /* Get the object pointed to by the namespace node */ *stack_ptr = (stack_desc->reference.node)->object; acpi_ut_add_reference(*stack_ptr); } acpi_ut_remove_reference(stack_desc); break; default: ACPI_ERROR((AE_INFO, "Unknown Reference type 0x%X in %p", ref_type, stack_desc)); status = AE_AML_INTERNAL; break; } break; case ACPI_TYPE_BUFFER: status = acpi_ds_get_buffer_arguments(stack_desc); break; case ACPI_TYPE_PACKAGE: status = acpi_ds_get_package_arguments(stack_desc); break; case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "FieldRead SourceDesc=%p Type=%X\n", stack_desc, stack_desc->common.type)); status = acpi_ex_read_data_from_field(walk_state, stack_desc, &obj_desc); /* Remove a reference to the original operand, then override */ acpi_ut_remove_reference(*stack_ptr); *stack_ptr = (void *)obj_desc; break; default: break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_resolve_multiple * * PARAMETERS: walk_state - Current state (contains AML opcode) * operand - Starting point for resolution * return_type - Where the object type is returned * return_desc - Where the resolved object is returned * * RETURN: Status * * DESCRIPTION: Return the base object and type. Traverse a reference list if * necessary to get to the base object. * ******************************************************************************/ acpi_status acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state, union acpi_operand_object *operand, acpi_object_type * return_type, union acpi_operand_object **return_desc) { union acpi_operand_object *obj_desc = (void *)operand; struct acpi_namespace_node *node; acpi_object_type type; acpi_status status; ACPI_FUNCTION_TRACE(acpi_ex_resolve_multiple); /* Operand can be either a namespace node or an operand descriptor */ switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: type = obj_desc->common.type; break; case ACPI_DESC_TYPE_NAMED: type = ((struct acpi_namespace_node *)obj_desc)->type; obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) obj_desc); /* If we had an Alias node, use the attached object for type info */ if (type == ACPI_TYPE_LOCAL_ALIAS) { type = ((struct acpi_namespace_node *)obj_desc)->type; obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) obj_desc); } break; default: return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* If type is anything other than a reference, we are done */ if (type != ACPI_TYPE_LOCAL_REFERENCE) { goto exit; } /* * For reference objects created via the ref_of, Index, or Load/load_table * operators, we need to get to the base object (as per the ACPI * specification of the object_type and size_of operators). This means * traversing the list of possibly many nested references. */ while (obj_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { switch (obj_desc->reference.class) { case ACPI_REFCLASS_REFOF: case ACPI_REFCLASS_NAME: /* Dereference the reference pointer */ if (obj_desc->reference.class == ACPI_REFCLASS_REFOF) { node = obj_desc->reference.object; } else { /* AML_INT_NAMEPATH_OP */ node = obj_desc->reference.node; } /* All "References" point to a NS node */ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { ACPI_ERROR((AE_INFO, "Not a namespace node %p [%s]", node, acpi_ut_get_descriptor_name(node))); return_ACPI_STATUS(AE_AML_INTERNAL); } /* Get the attached object */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* No object, use the NS node type */ type = acpi_ns_get_type(node); goto exit; } /* Check for circular references */ if (obj_desc == operand) { return_ACPI_STATUS(AE_AML_CIRCULAR_REFERENCE); } break; case ACPI_REFCLASS_INDEX: /* Get the type of this reference (index into another object) */ type = obj_desc->reference.target_type; if (type != ACPI_TYPE_PACKAGE) { goto exit; } /* * The main object is a package, we want to get the type * of the individual package element that is referenced by * the index. * * This could of course in turn be another reference object. */ obj_desc = *(obj_desc->reference.where); if (!obj_desc) { /* NULL package elements are allowed */ type = 0; /* Uninitialized */ goto exit; } break; case ACPI_REFCLASS_TABLE: type = ACPI_TYPE_DDB_HANDLE; goto exit; case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: if (return_desc) { status = acpi_ds_method_data_get_value(obj_desc-> reference. class, obj_desc-> reference. value, walk_state, &obj_desc); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } acpi_ut_remove_reference(obj_desc); } else { status = acpi_ds_method_data_get_node(obj_desc-> reference. class, obj_desc-> reference. value, walk_state, &node); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { type = ACPI_TYPE_ANY; goto exit; } } break; case ACPI_REFCLASS_DEBUG: /* The Debug Object is of type "DebugObject" */ type = ACPI_TYPE_DEBUG_OBJECT; goto exit; default: ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_INTERNAL); } } /* * Now we are guaranteed to have an object that has not been created * via the ref_of or Index operators. */ type = obj_desc->common.type; exit: /* Convert internal types to external types */ switch (type) { case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: type = ACPI_TYPE_FIELD_UNIT; break; case ACPI_TYPE_LOCAL_SCOPE: /* Per ACPI Specification, Scope is untyped */ type = ACPI_TYPE_ANY; break; default: /* No change to Type required */ break; } *return_type = type; if (return_desc) { *return_desc = obj_desc; } return_ACPI_STATUS(AE_OK); }
gpl-2.0
KDr2/linux
drivers/acpi/acpica/evmisc.c
2138
8760
/****************************************************************************** * * Module Name: evmisc - Miscellaneous event manager support functions * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evmisc") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_is_notify_object * * PARAMETERS: node - Node to check * * RETURN: TRUE if notifies allowed on this object * * DESCRIPTION: Check type of node for a object that supports notifies. * * TBD: This could be replaced by a flag bit in the node. * ******************************************************************************/ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) { switch (node->type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: /* * These are the ONLY objects that can receive ACPI notifications */ return (TRUE); default: return (FALSE); } } /******************************************************************************* * * FUNCTION: acpi_ev_queue_notify_request * * PARAMETERS: node - NS node for the notified object * notify_value - Value from the Notify() request * * RETURN: Status * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ acpi_status acpi_ev_queue_notify_request(struct acpi_namespace_node * node, u32 notify_value) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_list_head = NULL; union acpi_generic_state *info; u8 handler_list_id = 0; acpi_status status = AE_OK; ACPI_FUNCTION_NAME(ev_queue_notify_request); /* Are Notifies allowed on this object? */ if (!acpi_ev_is_notify_object(node)) { return (AE_TYPE); } /* Get the correct notify list type (System or Device) */ if (notify_value <= ACPI_MAX_SYS_NOTIFY) { handler_list_id = ACPI_SYSTEM_HANDLER_LIST; } else { handler_list_id = ACPI_DEVICE_HANDLER_LIST; } /* Get the notify object attached to the namespace Node */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* We have an attached object, Get the correct handler list */ handler_list_head = obj_desc->common_notify.notify_list[handler_list_id]; } /* * If there is no notify handler (Global or Local) * for this object, just ignore the notify */ if (!acpi_gbl_global_notify[handler_list_id].handler && !handler_list_head) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No notify handler for Notify, ignoring (%4.4s, %X) node %p\n", acpi_ut_get_node_name(node), notify_value, node)); return (AE_OK); } /* Setup notify info and schedule the notify dispatcher */ info = acpi_ut_create_generic_state(); if (!info) { return (AE_NO_MEMORY); } info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY; info->notify.node = node; info->notify.value = (u16)notify_value; info->notify.handler_list_id = handler_list_id; info->notify.handler_list_head = handler_list_head; info->notify.global = &acpi_gbl_global_notify[handler_list_id]; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n", acpi_ut_get_node_name(node), acpi_ut_get_type_name(node->type), notify_value, acpi_ut_get_notify_name(notify_value), node)); status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, info); if (ACPI_FAILURE(status)) { acpi_ut_delete_generic_state(info); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_notify_dispatch * * PARAMETERS: context - To be passed to the notify handler * * RETURN: None. * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) { union acpi_generic_state *info = (union acpi_generic_state *)context; union acpi_operand_object *handler_obj; ACPI_FUNCTION_ENTRY(); /* Invoke a global notify handler if installed */ if (info->notify.global->handler) { info->notify.global->handler(info->notify.node, info->notify.value, info->notify.global->context); } /* Now invoke the local notify handler(s) if any are installed */ handler_obj = info->notify.handler_list_head; while (handler_obj) { handler_obj->notify.handler(info->notify.node, info->notify.value, handler_obj->notify.context); handler_obj = handler_obj->notify.next[info->notify.handler_list_id]; } /* All done with the info object */ acpi_ut_delete_generic_state(info); } #if (!ACPI_REDUCED_HARDWARE) /****************************************************************************** * * FUNCTION: acpi_ev_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Disable events and free memory allocated for table storage. * ******************************************************************************/ void acpi_ev_terminate(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); if (acpi_gbl_events_initialized) { /* * Disable all event-related functionality. In all cases, on error, * print a message but obviously we don't abort. */ /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %u", (u32) i)); } } /* Disable all GPEs in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); /* Remove SCI handler */ status = acpi_ev_remove_sci_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); } status = acpi_ev_remove_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove Global Lock handler")); } } /* Deallocate all handler objects installed within GPE info structs */ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); /* Return to original mode if necessary */ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { status = acpi_disable(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "AcpiDisable failed")); } } return_VOID; } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
mydongistiny/android_kernel_motorola_shamu
drivers/net/wireless/ath/ath6kl/hif.c
2650
19017
/* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hif.h" #include <linux/export.h> #include "core.h" #include "target.h" #include "hif-ops.h" #include "debug.h" #include "trace.h" #define MAILBOX_FOR_BLOCK_SIZE 1 #define ATH6KL_TIME_QUANTUM 10 /* in ms */ static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma) { u8 *buf; int i; buf = req->virt_dma_buf; for (i = 0; i < req->scat_entries; i++) { if (from_dma) memcpy(req->scat_list[i].buf, buf, req->scat_list[i].len); else memcpy(buf, req->scat_list[i].buf, req->scat_list[i].len); buf += req->scat_list[i].len; } return 0; } int ath6kl_hif_rw_comp_handler(void *context, int status) { struct htc_packet *packet = context; ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n", packet, status); packet->status = status; packet->completion(packet->context, packet); return 0; } EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler); #define REG_DUMP_COUNT_AR6003 60 #define REGISTER_DUMP_LEN_MAX 60 static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar) { __le32 regdump_val[REGISTER_DUMP_LEN_MAX]; u32 i, address, regdump_addr = 0; int ret; if (ar->target_type != TARGET_TYPE_AR6003) return; /* the reg dump pointer is copied to the host interest area */ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state)); address = TARG_VTOP(ar->target_type, address); /* read RAM location through diagnostic window */ ret = ath6kl_diag_read32(ar, address, &regdump_addr); if (ret || !regdump_addr) { ath6kl_warn("failed to get ptr to register dump area: %d\n", ret); return; } ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n", regdump_addr); regdump_addr = TARG_VTOP(ar->target_type, regdump_addr); /* fetch register dump data */ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0], REG_DUMP_COUNT_AR6003 * (sizeof(u32))); if (ret) { ath6kl_warn("failed to get register dump: %d\n", ret); return; } ath6kl_info("crash dump:\n"); ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version, ar->wiphy->fw_version); BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4); for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) { ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n", i, le32_to_cpu(regdump_val[i]), le32_to_cpu(regdump_val[i + 1]), le32_to_cpu(regdump_val[i + 2]), le32_to_cpu(regdump_val[i + 3])); } } static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev) { u32 dummy; int ret; ath6kl_warn("firmware crashed\n"); /* * read counter to clear the interrupt, the debug error interrupt is * counter 0. */ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS, (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC); if (ret) ath6kl_warn("Failed to clear debug interrupt: %d\n", ret); ath6kl_hif_dump_fw_crash(dev->ar); ath6kl_read_fwlogs(dev->ar); ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT); return ret; } /* mailbox recv message polling */ int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd, int timeout) { struct ath6kl_irq_proc_registers *rg; int status = 0, i; u8 htc_mbox = 1 << HTC_MAILBOX; for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) { /* this is the standard HIF way, load the reg table */ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, (u8 *) &dev->irq_proc_reg, sizeof(dev->irq_proc_reg), HIF_RD_SYNC_BYTE_INC); if (status) { ath6kl_err("failed to read reg table\n"); return status; } /* check for MBOX data and valid lookahead */ if (dev->irq_proc_reg.host_int_status & htc_mbox) { if (dev->irq_proc_reg.rx_lkahd_valid & htc_mbox) { /* * Mailbox has a message and the look ahead * is valid. */ rg = &dev->irq_proc_reg; *lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); break; } } /* delay a little */ mdelay(ATH6KL_TIME_QUANTUM); ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i); } if (i == 0) { ath6kl_err("timeout waiting for recv message\n"); status = -ETIME; /* check if the target asserted */ if (dev->irq_proc_reg.counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) /* * Target failure handler will be called in case of * an assert. */ ath6kl_hif_proc_dbg_intr(dev); } return status; } /* * Disable packet reception (used in case the host runs out of buffers) * using the interrupt enable registers through the host I/F */ int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx) { struct ath6kl_irq_enable_reg regs; int status = 0; ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n", enable_rx ? "enable" : "disable"); /* take the lock to protect interrupt enable shadows */ spin_lock_bh(&dev->lock); if (enable_rx) dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); else dev->irq_en_reg.int_status_en &= ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(struct ath6kl_irq_enable_reg), HIF_WR_SYNC_BYTE_INC); return status; } int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev, struct hif_scatter_req *scat_req, bool read) { int status = 0; if (read) { scat_req->req = HIF_RD_SYNC_BLOCK_FIX; scat_req->addr = dev->ar->mbox_info.htc_addr; } else { scat_req->req = HIF_WR_ASYNC_BLOCK_INC; scat_req->addr = (scat_req->len > HIF_MBOX_WIDTH) ? dev->ar->mbox_info.htc_ext_addr : dev->ar->mbox_info.htc_addr; } ath6kl_dbg(ATH6KL_DBG_HIF, "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n", scat_req->scat_entries, scat_req->len, scat_req->addr, !read ? "async" : "sync", (read) ? "rd" : "wr"); if (!read && scat_req->virt_scat) { status = ath6kl_hif_cp_scat_dma_buf(scat_req, false); if (status) { scat_req->status = status; scat_req->complete(dev->ar->htc_target, scat_req); return 0; } } status = ath6kl_hif_scat_req_rw(dev->ar, scat_req); if (read) { /* in sync mode, we can touch the scatter request */ scat_req->status = status; if (!status && scat_req->virt_scat) scat_req->status = ath6kl_hif_cp_scat_dma_buf(scat_req, true); } return status; } static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev) { u8 counter_int_status; ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n"); counter_int_status = dev->irq_proc_reg.counter_int_status & dev->irq_en_reg.cntr_int_status_en; ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n", counter_int_status); /* * NOTE: other modules like GMBOX may use the counter interrupt for * credit flow control on other counters, we only need to check for * the debug assertion counter interrupt. */ if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK) return ath6kl_hif_proc_dbg_intr(dev); return 0; } static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev) { int status; u8 error_int_status; u8 reg_buf[4]; ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n"); error_int_status = dev->irq_proc_reg.error_int_status & 0x0F; if (!error_int_status) { WARN_ON(1); return -EIO; } ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n", error_int_status); if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status)) ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n"); if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status)) ath6kl_err("rx underflow\n"); if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status)) ath6kl_err("tx overflow\n"); /* Clear the interrupt */ dev->irq_proc_reg.error_int_status &= ~error_int_status; /* set W1C value to clear the interrupt, this hits the register first */ reg_buf[0] = error_int_status; reg_buf[1] = 0; reg_buf[2] = 0; reg_buf[3] = 0; status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); WARN_ON(status); return status; } static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev) { int status; u8 cpu_int_status; u8 reg_buf[4]; ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n"); cpu_int_status = dev->irq_proc_reg.cpu_int_status & dev->irq_en_reg.cpu_int_status_en; if (!cpu_int_status) { WARN_ON(1); return -EIO; } ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n", cpu_int_status); /* Clear the interrupt */ dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status; /* * Set up the register transfer buffer to hit the register 4 times , * this is done to make the access 4-byte aligned to mitigate issues * with host bus interconnects that restrict bus transfer lengths to * be a multiple of 4-bytes. */ /* set W1C value to clear the interrupt, this hits the register first */ reg_buf[0] = cpu_int_status; /* the remaining are set to zero which have no-effect */ reg_buf[1] = 0; reg_buf[2] = 0; reg_buf[3] = 0; status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); WARN_ON(status); return status; } /* process pending interrupts synchronously */ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done) { struct ath6kl_irq_proc_registers *rg; int status = 0; u8 host_int_status = 0; u32 lk_ahd = 0; u8 htc_mbox = 1 << HTC_MAILBOX; ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev); /* * NOTE: HIF implementation guarantees that the context of this * call allows us to perform SYNCHRONOUS I/O, that is we can block, * sleep or call any API that can block or switch thread/task * contexts. This is a fully schedulable context. */ /* * Process pending intr only when int_status_en is clear, it may * result in unnecessary bus transaction otherwise. Target may be * unresponsive at the time. */ if (dev->irq_en_reg.int_status_en) { /* * Read the first 28 bytes of the HTC register table. This * will yield us the value of different int status * registers and the lookahead registers. * * length = sizeof(int_status) + sizeof(cpu_int_status) * + sizeof(error_int_status) + * sizeof(counter_int_status) + * sizeof(mbox_frame) + sizeof(rx_lkahd_valid) * + sizeof(hole) + sizeof(rx_lkahd) + * sizeof(int_status_en) + * sizeof(cpu_int_status_en) + * sizeof(err_int_status_en) + * sizeof(cntr_int_status_en); */ status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS, (u8 *) &dev->irq_proc_reg, sizeof(dev->irq_proc_reg), HIF_RD_SYNC_BYTE_INC); if (status) goto out; ath6kl_dump_registers(dev, &dev->irq_proc_reg, &dev->irq_en_reg); trace_ath6kl_sdio_irq(&dev->irq_en_reg, sizeof(dev->irq_en_reg)); /* Update only those registers that are enabled */ host_int_status = dev->irq_proc_reg.host_int_status & dev->irq_en_reg.int_status_en; /* Look at mbox status */ if (host_int_status & htc_mbox) { /* * Mask out pending mbox value, we use "lookAhead as * the real flag for mbox processing. */ host_int_status &= ~htc_mbox; if (dev->irq_proc_reg.rx_lkahd_valid & htc_mbox) { rg = &dev->irq_proc_reg; lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]); if (!lk_ahd) ath6kl_err("lookAhead is zero!\n"); } } } if (!host_int_status && !lk_ahd) { *done = true; goto out; } if (lk_ahd) { int fetched = 0; ath6kl_dbg(ATH6KL_DBG_IRQ, "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd); /* * Mailbox Interrupt, the HTC layer may issue async * requests to empty the mailbox. When emptying the recv * mailbox we use the async handler above called from the * completion routine of the callers read request. This can * improve performance by reducing context switching when * we rapidly pull packets. */ status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt, lk_ahd, &fetched); if (status) goto out; if (!fetched) /* * HTC could not pull any messages out due to lack * of resources. */ dev->htc_cnxt->chk_irq_status_cnt = 0; } /* now handle the rest of them */ ath6kl_dbg(ATH6KL_DBG_IRQ, "valid interrupt source(s) for other interrupts: 0x%x\n", host_int_status); if (MS(HOST_INT_STATUS_CPU, host_int_status)) { /* CPU Interrupt */ status = ath6kl_hif_proc_cpu_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_ERROR, host_int_status)) { /* Error Interrupt */ status = ath6kl_hif_proc_err_intr(dev); if (status) goto out; } if (MS(HOST_INT_STATUS_COUNTER, host_int_status)) /* Counter Interrupt */ status = ath6kl_hif_proc_counter_intr(dev); out: /* * An optimization to bypass reading the IRQ status registers * unecessarily which can re-wake the target, if upper layers * determine that we are in a low-throughput mode, we can rely on * taking another interrupt rather than re-checking the status * registers which can re-wake the target. * * NOTE : for host interfaces that makes use of detecting pending * mbox messages at hif can not use this optimization due to * possible side effects, SPI requires the host to drain all * messages from the mailbox before exiting the ISR routine. */ ath6kl_dbg(ATH6KL_DBG_IRQ, "bypassing irq status re-check, forcing done\n"); if (!dev->htc_cnxt->chk_irq_status_cnt) *done = true; ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (done:%d, status=%d\n", *done, status); return status; } /* interrupt handler, kicks off all interrupt processing */ int ath6kl_hif_intr_bh_handler(struct ath6kl *ar) { struct ath6kl_device *dev = ar->htc_target->dev; unsigned long timeout; int status = 0; bool done = false; /* * Reset counter used to flag a re-scan of IRQ status registers on * the target. */ dev->htc_cnxt->chk_irq_status_cnt = 0; /* * IRQ processing is synchronous, interrupt status registers can be * re-read. */ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT); while (time_before(jiffies, timeout) && !done) { status = proc_pending_irqs(dev, &done); if (status) break; } return status; } EXPORT_SYMBOL(ath6kl_hif_intr_bh_handler); static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; int status; spin_lock_bh(&dev->lock); /* Enable all but ATH6KL CPU interrupts */ dev->irq_en_reg.int_status_en = SM(INT_STATUS_ENABLE_ERROR, 0x01) | SM(INT_STATUS_ENABLE_CPU, 0x01) | SM(INT_STATUS_ENABLE_COUNTER, 0x01); /* * NOTE: There are some cases where HIF can do detection of * pending mbox messages which is disabled now. */ dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01); /* Set up the CPU Interrupt status Register */ dev->irq_en_reg.cpu_int_status_en = 0; /* Set up the Error Interrupt status Register */ dev->irq_en_reg.err_int_status_en = SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) | SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1); /* * Enable Counter interrupt status register to get fatal errors for * debugging. */ dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT, ATH6KL_TARGET_DEBUG_INTR_MASK); memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(regs), HIF_WR_SYNC_BYTE_INC); if (status) ath6kl_err("failed to update interrupt ctl reg err: %d\n", status); return status; } int ath6kl_hif_disable_intrs(struct ath6kl_device *dev) { struct ath6kl_irq_enable_reg regs; spin_lock_bh(&dev->lock); /* Disable all interrupts */ dev->irq_en_reg.int_status_en = 0; dev->irq_en_reg.cpu_int_status_en = 0; dev->irq_en_reg.err_int_status_en = 0; dev->irq_en_reg.cntr_int_status_en = 0; memcpy(&regs, &dev->irq_en_reg, sizeof(regs)); spin_unlock_bh(&dev->lock); return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS, &regs.int_status_en, sizeof(regs), HIF_WR_SYNC_BYTE_INC); } /* enable device interrupts */ int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev) { int status = 0; /* * Make sure interrupt are disabled before unmasking at the HIF * layer. The rationale here is that between device insertion * (where we clear the interrupts the first time) and when HTC * is finally ready to handle interrupts, other software can perform * target "soft" resets. The ATH6KL interrupt enables reset back to an * "enabled" state when this happens. */ ath6kl_hif_disable_intrs(dev); /* unmask the host controller interrupts */ ath6kl_hif_irq_enable(dev->ar); status = ath6kl_hif_enable_intrs(dev); return status; } /* disable all device interrupts */ int ath6kl_hif_mask_intrs(struct ath6kl_device *dev) { /* * Mask the interrupt at the HIF layer to avoid any stray interrupt * taken while we zero out our shadow registers in * ath6kl_hif_disable_intrs(). */ ath6kl_hif_irq_disable(dev->ar); return ath6kl_hif_disable_intrs(dev); } int ath6kl_hif_setup(struct ath6kl_device *dev) { int status = 0; spin_lock_init(&dev->lock); /* * NOTE: we actually get the block size of a mailbox other than 0, * for SDIO the block size on mailbox 0 is artificially set to 1. * So we use the block size that is set for the other 3 mailboxes. */ dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size; /* must be a power of 2 */ if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) { WARN_ON(1); status = -EINVAL; goto fail_setup; } /* assemble mask, used for padding to a block */ dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1; ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); status = ath6kl_hif_disable_intrs(dev); fail_setup: return status; }
gpl-2.0
yajnab/android_kernel_samsung_baffin
drivers/hwmon/acpi_power_meter.c
4186
24367
/* * A hwmon driver for ACPI 4.0 power meters * Copyright (C) 2009 IBM * * Author: Darrick J. Wong <djwong@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/sched.h> #include <linux/time.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #define ACPI_POWER_METER_NAME "power_meter" ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); #define ACPI_POWER_METER_DEVICE_NAME "Power Meter" #define ACPI_POWER_METER_CLASS "pwr_meter_resource" #define NUM_SENSORS 17 #define POWER_METER_CAN_MEASURE (1 << 0) #define POWER_METER_CAN_TRIP (1 << 1) #define POWER_METER_CAN_CAP (1 << 2) #define POWER_METER_CAN_NOTIFY (1 << 3) #define POWER_METER_IS_BATTERY (1 << 8) #define UNKNOWN_HYSTERESIS 0xFFFFFFFF #define METER_NOTIFY_CONFIG 0x80 #define METER_NOTIFY_TRIP 0x81 #define METER_NOTIFY_CAP 0x82 #define METER_NOTIFY_CAPPING 0x83 #define METER_NOTIFY_INTERVAL 0x84 #define POWER_AVERAGE_NAME "power1_average" #define POWER_CAP_NAME "power1_cap" #define POWER_AVG_INTERVAL_NAME "power1_average_interval" #define POWER_ALARM_NAME "power1_alarm" static int cap_in_hardware; static int force_cap_on; static int can_cap_in_hardware(void) { return force_cap_on || cap_in_hardware; } static const struct acpi_device_id power_meter_ids[] = { {"ACPI000D", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, power_meter_ids); struct acpi_power_meter_capabilities { u64 flags; u64 units; u64 type; u64 accuracy; u64 sampling_time; u64 min_avg_interval; u64 max_avg_interval; u64 hysteresis; u64 configurable_cap; u64 min_cap; u64 max_cap; }; struct acpi_power_meter_resource { struct acpi_device *acpi_dev; acpi_bus_id name; struct mutex lock; struct device *hwmon_dev; struct acpi_power_meter_capabilities caps; acpi_string model_number; acpi_string serial_number; acpi_string oem_info; u64 power; u64 cap; u64 avg_interval; int sensors_valid; unsigned long sensors_last_updated; struct sensor_device_attribute sensors[NUM_SENSORS]; int num_sensors; int trip[2]; int num_domain_devices; struct acpi_device **domain_devices; struct kobject *holders_dir; }; struct ro_sensor_template { char *label; ssize_t (*show)(struct device *dev, struct device_attribute *devattr, char *buf); int index; }; struct rw_sensor_template { char *label; ssize_t (*show)(struct device *dev, struct device_attribute *devattr, char *buf); ssize_t (*set)(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); int index; }; /* Averaging interval */ static int update_avg_interval(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GAI", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GAI")); return -ENODEV; } resource->avg_interval = data; return 0; } static ssize_t show_avg_interval(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_avg_interval(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->avg_interval); } static ssize_t set_avg_interval(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; int res; unsigned long temp; unsigned long long data; acpi_status status; res = strict_strtoul(buf, 10, &temp); if (res) return res; if (temp > resource->caps.max_avg_interval || temp < resource->caps.min_avg_interval) return -EINVAL; arg0.integer.value = temp; mutex_lock(&resource->lock); status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI", &args, &data); if (!ACPI_FAILURE(status)) resource->avg_interval = temp; mutex_unlock(&resource->lock); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PAI")); return -EINVAL; } /* _PAI returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return count; } /* Cap functions */ static int update_cap(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GHL", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GHL")); return -ENODEV; } resource->cap = data; return 0; } static ssize_t show_cap(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_cap(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->cap * 1000); } static ssize_t set_cap(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; int res; unsigned long temp; unsigned long long data; acpi_status status; res = strict_strtoul(buf, 10, &temp); if (res) return res; temp /= 1000; if (temp > resource->caps.max_cap || temp < resource->caps.min_cap) return -EINVAL; arg0.integer.value = temp; mutex_lock(&resource->lock); status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL", &args, &data); if (!ACPI_FAILURE(status)) resource->cap = temp; mutex_unlock(&resource->lock); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SHL")); return -EINVAL; } /* _SHL returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return count; } /* Power meter trip points */ static int set_acpi_trip(struct acpi_power_meter_resource *resource) { union acpi_object arg_objs[] = { {ACPI_TYPE_INTEGER}, {ACPI_TYPE_INTEGER} }; struct acpi_object_list args = { 2, arg_objs }; unsigned long long data; acpi_status status; /* Both trip levels must be set */ if (resource->trip[0] < 0 || resource->trip[1] < 0) return 0; /* This driver stores min, max; ACPI wants max, min. */ arg_objs[0].integer.value = resource->trip[1]; arg_objs[1].integer.value = resource->trip[0]; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PTP", &args, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTP")); return -EINVAL; } /* _PTP returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return 0; } static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; int res; unsigned long temp; res = strict_strtoul(buf, 10, &temp); if (res) return res; temp /= 1000; if (temp < 0) return -EINVAL; mutex_lock(&resource->lock); resource->trip[attr->index - 7] = temp; res = set_acpi_trip(resource); mutex_unlock(&resource->lock); if (res) return res; return count; } /* Power meter */ static int update_meter(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; unsigned long local_jiffies = jiffies; if (time_before(local_jiffies, resource->sensors_last_updated + msecs_to_jiffies(resource->caps.sampling_time)) && resource->sensors_valid) return 0; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PMM", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMM")); return -ENODEV; } resource->power = data; resource->sensors_valid = 1; resource->sensors_last_updated = jiffies; return 0; } static ssize_t show_power(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_meter(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->power * 1000); } /* Miscellaneous */ static ssize_t show_str(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; acpi_string val; switch (attr->index) { case 0: val = resource->model_number; break; case 1: val = resource->serial_number; break; case 2: val = resource->oem_info; break; default: BUG(); } return sprintf(buf, "%s\n", val); } static ssize_t show_val(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; u64 val = 0; switch (attr->index) { case 0: val = resource->caps.min_avg_interval; break; case 1: val = resource->caps.max_avg_interval; break; case 2: val = resource->caps.min_cap * 1000; break; case 3: val = resource->caps.max_cap * 1000; break; case 4: if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS) return sprintf(buf, "unknown\n"); val = resource->caps.hysteresis * 1000; break; case 5: if (resource->caps.flags & POWER_METER_IS_BATTERY) val = 1; else val = 0; break; case 6: if (resource->power > resource->cap) val = 1; else val = 0; break; case 7: case 8: if (resource->trip[attr->index - 7] < 0) return sprintf(buf, "unknown\n"); val = resource->trip[attr->index - 7] * 1000; break; default: BUG(); } return sprintf(buf, "%llu\n", val); } static ssize_t show_accuracy(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; unsigned int acc = resource->caps.accuracy; return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000); } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME); } /* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */ static struct ro_sensor_template meter_ro_attrs[] = { {POWER_AVERAGE_NAME, show_power, 0}, {"power1_accuracy", show_accuracy, 0}, {"power1_average_interval_min", show_val, 0}, {"power1_average_interval_max", show_val, 1}, {"power1_is_battery", show_val, 5}, {NULL, NULL, 0}, }; static struct rw_sensor_template meter_rw_attrs[] = { {POWER_AVG_INTERVAL_NAME, show_avg_interval, set_avg_interval, 0}, {NULL, NULL, NULL, 0}, }; static struct ro_sensor_template misc_cap_attrs[] = { {"power1_cap_min", show_val, 2}, {"power1_cap_max", show_val, 3}, {"power1_cap_hyst", show_val, 4}, {POWER_ALARM_NAME, show_val, 6}, {NULL, NULL, 0}, }; static struct ro_sensor_template ro_cap_attrs[] = { {POWER_CAP_NAME, show_cap, 0}, {NULL, NULL, 0}, }; static struct rw_sensor_template rw_cap_attrs[] = { {POWER_CAP_NAME, show_cap, set_cap, 0}, {NULL, NULL, NULL, 0}, }; static struct rw_sensor_template trip_attrs[] = { {"power1_average_min", show_val, set_trip, 7}, {"power1_average_max", show_val, set_trip, 8}, {NULL, NULL, NULL, 0}, }; static struct ro_sensor_template misc_attrs[] = { {"name", show_name, 0}, {"power1_model_number", show_str, 0}, {"power1_oem_info", show_str, 2}, {"power1_serial_number", show_str, 1}, {NULL, NULL, 0}, }; /* Read power domain data */ static void remove_domain_devices(struct acpi_power_meter_resource *resource) { int i; if (!resource->num_domain_devices) return; for (i = 0; i < resource->num_domain_devices; i++) { struct acpi_device *obj = resource->domain_devices[i]; if (!obj) continue; sysfs_remove_link(resource->holders_dir, kobject_name(&obj->dev.kobj)); put_device(&obj->dev); } kfree(resource->domain_devices); kobject_put(resource->holders_dir); resource->num_domain_devices = 0; } static int read_domain_devices(struct acpi_power_meter_resource *resource) { int res = 0; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *pss; acpi_status status; status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMD", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMD")); return -ENODEV; } pss = buffer.pointer; if (!pss || pss->type != ACPI_TYPE_PACKAGE) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Invalid _PMD data\n"); res = -EFAULT; goto end; } if (!pss->package.count) goto end; resource->domain_devices = kzalloc(sizeof(struct acpi_device *) * pss->package.count, GFP_KERNEL); if (!resource->domain_devices) { res = -ENOMEM; goto end; } resource->holders_dir = kobject_create_and_add("measures", &resource->acpi_dev->dev.kobj); if (!resource->holders_dir) { res = -ENOMEM; goto exit_free; } resource->num_domain_devices = pss->package.count; for (i = 0; i < pss->package.count; i++) { struct acpi_device *obj; union acpi_object *element = &(pss->package.elements[i]); /* Refuse non-references */ if (element->type != ACPI_TYPE_LOCAL_REFERENCE) continue; /* Create a symlink to domain objects */ resource->domain_devices[i] = NULL; status = acpi_bus_get_device(element->reference.handle, &resource->domain_devices[i]); if (ACPI_FAILURE(status)) continue; obj = resource->domain_devices[i]; get_device(&obj->dev); res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj, kobject_name(&obj->dev.kobj)); if (res) { put_device(&obj->dev); resource->domain_devices[i] = NULL; } } res = 0; goto end; exit_free: kfree(resource->domain_devices); end: kfree(buffer.pointer); return res; } /* Registration and deregistration */ static int register_ro_attrs(struct acpi_power_meter_resource *resource, struct ro_sensor_template *ro) { struct device *dev = &resource->acpi_dev->dev; struct sensor_device_attribute *sensors = &resource->sensors[resource->num_sensors]; int res = 0; while (ro->label) { sensors->dev_attr.attr.name = ro->label; sensors->dev_attr.attr.mode = S_IRUGO; sensors->dev_attr.show = ro->show; sensors->index = ro->index; res = device_create_file(dev, &sensors->dev_attr); if (res) { sensors->dev_attr.attr.name = NULL; goto error; } sensors++; resource->num_sensors++; ro++; } error: return res; } static int register_rw_attrs(struct acpi_power_meter_resource *resource, struct rw_sensor_template *rw) { struct device *dev = &resource->acpi_dev->dev; struct sensor_device_attribute *sensors = &resource->sensors[resource->num_sensors]; int res = 0; while (rw->label) { sensors->dev_attr.attr.name = rw->label; sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; sensors->dev_attr.show = rw->show; sensors->dev_attr.store = rw->set; sensors->index = rw->index; res = device_create_file(dev, &sensors->dev_attr); if (res) { sensors->dev_attr.attr.name = NULL; goto error; } sensors++; resource->num_sensors++; rw++; } error: return res; } static void remove_attrs(struct acpi_power_meter_resource *resource) { int i; for (i = 0; i < resource->num_sensors; i++) { if (!resource->sensors[i].dev_attr.attr.name) continue; device_remove_file(&resource->acpi_dev->dev, &resource->sensors[i].dev_attr); } remove_domain_devices(resource); resource->num_sensors = 0; } static int setup_attrs(struct acpi_power_meter_resource *resource) { int res = 0; res = read_domain_devices(resource); if (res) return res; if (resource->caps.flags & POWER_METER_CAN_MEASURE) { res = register_ro_attrs(resource, meter_ro_attrs); if (res) goto error; res = register_rw_attrs(resource, meter_rw_attrs); if (res) goto error; } if (resource->caps.flags & POWER_METER_CAN_CAP) { if (!can_cap_in_hardware()) { dev_err(&resource->acpi_dev->dev, "Ignoring unsafe software power cap!\n"); goto skip_unsafe_cap; } if (resource->caps.configurable_cap) { res = register_rw_attrs(resource, rw_cap_attrs); if (res) goto error; } else { res = register_ro_attrs(resource, ro_cap_attrs); if (res) goto error; } res = register_ro_attrs(resource, misc_cap_attrs); if (res) goto error; } skip_unsafe_cap: if (resource->caps.flags & POWER_METER_CAN_TRIP) { res = register_rw_attrs(resource, trip_attrs); if (res) goto error; } res = register_ro_attrs(resource, misc_attrs); if (res) goto error; return res; error: remove_attrs(resource); return res; } static void free_capabilities(struct acpi_power_meter_resource *resource) { acpi_string *str; int i; str = &resource->model_number; for (i = 0; i < 3; i++, str++) kfree(*str); } static int read_capabilities(struct acpi_power_meter_resource *resource) { int res = 0; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer state = { 0, NULL }; struct acpi_buffer format = { sizeof("NNNNNNNNNNN"), "NNNNNNNNNNN" }; union acpi_object *pss; acpi_string *str; acpi_status status; status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMC", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMC")); return -ENODEV; } pss = buffer.pointer; if (!pss || pss->type != ACPI_TYPE_PACKAGE || pss->package.count != 14) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Invalid _PMC data\n"); res = -EFAULT; goto end; } /* Grab all the integer data at once */ state.length = sizeof(struct acpi_power_meter_capabilities); state.pointer = &resource->caps; status = acpi_extract_package(pss, &format, &state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Invalid data")); res = -EFAULT; goto end; } if (resource->caps.units) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Unknown units %llu.\n", resource->caps.units); res = -EINVAL; goto end; } /* Grab the string data */ str = &resource->model_number; for (i = 11; i < 14; i++) { union acpi_object *element = &(pss->package.elements[i]); if (element->type != ACPI_TYPE_STRING) { res = -EINVAL; goto error; } *str = kzalloc(sizeof(u8) * (element->string.length + 1), GFP_KERNEL); if (!*str) { res = -ENOMEM; goto error; } strncpy(*str, element->string.pointer, element->string.length); str++; } dev_info(&resource->acpi_dev->dev, "Found ACPI power meter.\n"); goto end; error: str = &resource->model_number; for (i = 0; i < 3; i++, str++) kfree(*str); end: kfree(buffer.pointer); return res; } /* Handle ACPI event notifications */ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) { struct acpi_power_meter_resource *resource; int res; if (!device || !acpi_driver_data(device)) return; resource = acpi_driver_data(device); mutex_lock(&resource->lock); switch (event) { case METER_NOTIFY_CONFIG: free_capabilities(resource); res = read_capabilities(resource); if (res) break; remove_attrs(resource); setup_attrs(resource); break; case METER_NOTIFY_TRIP: sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME); update_meter(resource); break; case METER_NOTIFY_CAP: sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME); update_cap(resource); break; case METER_NOTIFY_INTERVAL: sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME); update_avg_interval(resource); break; case METER_NOTIFY_CAPPING: sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME); dev_info(&device->dev, "Capping in progress.\n"); break; default: BUG(); } mutex_unlock(&resource->lock); acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS, dev_name(&device->dev), event, 0); } static int acpi_power_meter_add(struct acpi_device *device) { int res; struct acpi_power_meter_resource *resource; if (!device) return -EINVAL; resource = kzalloc(sizeof(struct acpi_power_meter_resource), GFP_KERNEL); if (!resource) return -ENOMEM; resource->sensors_valid = 0; resource->acpi_dev = device; mutex_init(&resource->lock); strcpy(acpi_device_name(device), ACPI_POWER_METER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_METER_CLASS); device->driver_data = resource; free_capabilities(resource); res = read_capabilities(resource); if (res) goto exit_free; resource->trip[0] = resource->trip[1] = -1; res = setup_attrs(resource); if (res) goto exit_free; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { res = PTR_ERR(resource->hwmon_dev); goto exit_remove; } res = 0; goto exit; exit_remove: remove_attrs(resource); exit_free: kfree(resource); exit: return res; } static int acpi_power_meter_remove(struct acpi_device *device, int type) { struct acpi_power_meter_resource *resource; if (!device || !acpi_driver_data(device)) return -EINVAL; resource = acpi_driver_data(device); hwmon_device_unregister(resource->hwmon_dev); free_capabilities(resource); remove_attrs(resource); kfree(resource); return 0; } static int acpi_power_meter_resume(struct acpi_device *device) { struct acpi_power_meter_resource *resource; if (!device || !acpi_driver_data(device)) return -EINVAL; resource = acpi_driver_data(device); free_capabilities(resource); read_capabilities(resource); return 0; } static struct acpi_driver acpi_power_meter_driver = { .name = "power_meter", .class = ACPI_POWER_METER_CLASS, .ids = power_meter_ids, .ops = { .add = acpi_power_meter_add, .remove = acpi_power_meter_remove, .resume = acpi_power_meter_resume, .notify = acpi_power_meter_notify, }, }; /* Module init/exit routines */ static int __init enable_cap_knobs(const struct dmi_system_id *d) { cap_in_hardware = 1; return 0; } static struct dmi_system_id __initdata pm_dmi_table[] = { { enable_cap_knobs, "IBM Active Energy Manager", { DMI_MATCH(DMI_SYS_VENDOR, "IBM") }, }, {} }; static int __init acpi_power_meter_init(void) { int result; if (acpi_disabled) return -ENODEV; dmi_check_system(pm_dmi_table); result = acpi_bus_register_driver(&acpi_power_meter_driver); if (result < 0) return -ENODEV; return 0; } static void __exit acpi_power_meter_exit(void) { acpi_bus_unregister_driver(&acpi_power_meter_driver); } MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); MODULE_DESCRIPTION("ACPI 4.0 power meter driver"); MODULE_LICENSE("GPL"); module_param(force_cap_on, bool, 0644); MODULE_PARM_DESC(force_cap_on, "Enable power cap even it is unsafe to do so."); module_init(acpi_power_meter_init); module_exit(acpi_power_meter_exit);
gpl-2.0
RR-msm7x30/samsung-kernel-msm7x30-common
drivers/mfd/s5m-core.c
4698
4789
/* * s5m87xx.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/mutex.h> #include <linux/mfd/core.h> #include <linux/mfd/s5m87xx/s5m-core.h> #include <linux/mfd/s5m87xx/s5m-pmic.h> #include <linux/mfd/s5m87xx/s5m-rtc.h> #include <linux/regmap.h> static struct mfd_cell s5m8751_devs[] = { { .name = "s5m8751-pmic", }, { .name = "s5m-charger", }, { .name = "s5m8751-codec", }, }; static struct mfd_cell s5m8763_devs[] = { { .name = "s5m8763-pmic", }, { .name = "s5m-rtc", }, { .name = "s5m-charger", }, }; static struct mfd_cell s5m8767_devs[] = { { .name = "s5m8767-pmic", }, { .name = "s5m-rtc", }, }; int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest) { return regmap_read(s5m87xx->regmap, reg, dest); } EXPORT_SYMBOL_GPL(s5m_reg_read); int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf) { return regmap_bulk_read(s5m87xx->regmap, reg, buf, count); } EXPORT_SYMBOL_GPL(s5m_bulk_read); int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value) { return regmap_write(s5m87xx->regmap, reg, value); } EXPORT_SYMBOL_GPL(s5m_reg_write); int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf) { return regmap_raw_write(s5m87xx->regmap, reg, buf, count); } EXPORT_SYMBOL_GPL(s5m_bulk_write); int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask) { return regmap_update_bits(s5m87xx->regmap, reg, mask, val); } EXPORT_SYMBOL_GPL(s5m_reg_update); static struct regmap_config s5m_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int s5m87xx_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct s5m_platform_data *pdata = i2c->dev.platform_data; struct s5m87xx_dev *s5m87xx; int ret; s5m87xx = devm_kzalloc(&i2c->dev, sizeof(struct s5m87xx_dev), GFP_KERNEL); if (s5m87xx == NULL) return -ENOMEM; i2c_set_clientdata(i2c, s5m87xx); s5m87xx->dev = &i2c->dev; s5m87xx->i2c = i2c; s5m87xx->irq = i2c->irq; s5m87xx->type = id->driver_data; if (pdata) { s5m87xx->device_type = pdata->device_type; s5m87xx->ono = pdata->ono; s5m87xx->irq_base = pdata->irq_base; s5m87xx->wakeup = pdata->wakeup; } s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config); if (IS_ERR(s5m87xx->regmap)) { ret = PTR_ERR(s5m87xx->regmap); dev_err(&i2c->dev, "Failed to allocate register map: %d\n", ret); goto err; } s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); i2c_set_clientdata(s5m87xx->rtc, s5m87xx); if (pdata && pdata->cfg_pmic_irq) pdata->cfg_pmic_irq(); s5m_irq_init(s5m87xx); pm_runtime_set_active(s5m87xx->dev); switch (s5m87xx->device_type) { case S5M8751X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8751_devs, ARRAY_SIZE(s5m8751_devs), NULL, 0); break; case S5M8763X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8763_devs, ARRAY_SIZE(s5m8763_devs), NULL, 0); break; case S5M8767X: ret = mfd_add_devices(s5m87xx->dev, -1, s5m8767_devs, ARRAY_SIZE(s5m8767_devs), NULL, 0); break; default: /* If this happens the probe function is problem */ BUG(); } if (ret < 0) goto err; return ret; err: mfd_remove_devices(s5m87xx->dev); s5m_irq_exit(s5m87xx); i2c_unregister_device(s5m87xx->rtc); regmap_exit(s5m87xx->regmap); return ret; } static int s5m87xx_i2c_remove(struct i2c_client *i2c) { struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c); mfd_remove_devices(s5m87xx->dev); s5m_irq_exit(s5m87xx); i2c_unregister_device(s5m87xx->rtc); regmap_exit(s5m87xx->regmap); return 0; } static const struct i2c_device_id s5m87xx_i2c_id[] = { { "s5m87xx", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id); static struct i2c_driver s5m87xx_i2c_driver = { .driver = { .name = "s5m87xx", .owner = THIS_MODULE, }, .probe = s5m87xx_i2c_probe, .remove = s5m87xx_i2c_remove, .id_table = s5m87xx_i2c_id, }; static int __init s5m87xx_i2c_init(void) { return i2c_add_driver(&s5m87xx_i2c_driver); } subsys_initcall(s5m87xx_i2c_init); static void __exit s5m87xx_i2c_exit(void) { i2c_del_driver(&s5m87xx_i2c_driver); } module_exit(s5m87xx_i2c_exit); MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_DESCRIPTION("Core support for the S5M MFD"); MODULE_LICENSE("GPL");
gpl-2.0
zparallax/amplitude_kernel_tw
drivers/i2c/busses/i2c-diolan-u2c.c
4954
13121
/* * Driver for the Diolan u2c-12 USB-I2C adapter * * Copyright (c) 2010-2011 Ericsson AB * * Derived from: * i2c-tiny-usb.c * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/i2c.h> #define DRIVER_NAME "i2c-diolan-u2c" #define USB_VENDOR_ID_DIOLAN 0x0abf #define USB_DEVICE_ID_DIOLAN_U2C 0x3370 #define DIOLAN_OUT_EP 0x02 #define DIOLAN_IN_EP 0x84 /* commands via USB, must match command ids in the firmware */ #define CMD_I2C_READ 0x01 #define CMD_I2C_WRITE 0x02 #define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */ #define CMD_I2C_RELEASE_SDA 0x04 #define CMD_I2C_RELEASE_SCL 0x05 #define CMD_I2C_DROP_SDA 0x06 #define CMD_I2C_DROP_SCL 0x07 #define CMD_I2C_READ_SDA 0x08 #define CMD_I2C_READ_SCL 0x09 #define CMD_GET_FW_VERSION 0x0a #define CMD_GET_SERIAL 0x0b #define CMD_I2C_START 0x0c #define CMD_I2C_STOP 0x0d #define CMD_I2C_REPEATED_START 0x0e #define CMD_I2C_PUT_BYTE 0x0f #define CMD_I2C_GET_BYTE 0x10 #define CMD_I2C_PUT_ACK 0x11 #define CMD_I2C_GET_ACK 0x12 #define CMD_I2C_PUT_BYTE_ACK 0x13 #define CMD_I2C_GET_BYTE_ACK 0x14 #define CMD_I2C_SET_SPEED 0x1b #define CMD_I2C_GET_SPEED 0x1c #define CMD_I2C_SET_CLK_SYNC 0x24 #define CMD_I2C_GET_CLK_SYNC 0x25 #define CMD_I2C_SET_CLK_SYNC_TO 0x26 #define CMD_I2C_GET_CLK_SYNC_TO 0x27 #define RESP_OK 0x00 #define RESP_FAILED 0x01 #define RESP_BAD_MEMADDR 0x04 #define RESP_DATA_ERR 0x05 #define RESP_NOT_IMPLEMENTED 0x06 #define RESP_NACK 0x07 #define RESP_TIMEOUT 0x09 #define U2C_I2C_SPEED_FAST 0 /* 400 kHz */ #define U2C_I2C_SPEED_STD 1 /* 100 kHz */ #define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */ #define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1) #define U2C_I2C_FREQ_FAST 400000 #define U2C_I2C_FREQ_STD 100000 #define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10)) #define DIOLAN_USB_TIMEOUT 100 /* in ms */ #define DIOLAN_SYNC_TIMEOUT 20 /* in ms */ #define DIOLAN_OUTBUF_LEN 128 #define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4) #define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */ /* Structure to hold all of our device specific stuff */ struct i2c_diolan_u2c { u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */ u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */ struct usb_device *usb_dev; /* the usb device for this device */ struct usb_interface *interface;/* the interface for this device */ struct i2c_adapter adapter; /* i2c related things */ int olen; /* Output buffer length */ int ocount; /* Number of enqueued messages */ }; static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */ module_param(frequency, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz"); /* usb layer */ /* Send command to device, and get response. */ static int diolan_usb_transfer(struct i2c_diolan_u2c *dev) { int ret = 0; int actual; int i; if (!dev->olen || !dev->ocount) return -EINVAL; ret = usb_bulk_msg(dev->usb_dev, usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP), dev->obuffer, dev->olen, &actual, DIOLAN_USB_TIMEOUT); if (!ret) { for (i = 0; i < dev->ocount; i++) { int tmpret; tmpret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); /* * Stop command processing if a previous command * returned an error. * Note that we still need to retrieve all messages. */ if (ret < 0) continue; ret = tmpret; if (ret == 0 && actual > 0) { switch (dev->ibuffer[actual - 1]) { case RESP_NACK: /* * Return ENXIO if NACK was received as * response to the address phase, * EIO otherwise */ ret = i == 1 ? -ENXIO : -EIO; break; case RESP_TIMEOUT: ret = -ETIMEDOUT; break; case RESP_OK: /* strip off return code */ ret = actual - 1; break; default: ret = -EIO; break; } } } } dev->olen = 0; dev->ocount = 0; return ret; } static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush) { if (flush || dev->olen >= DIOLAN_FLUSH_LEN) return diolan_usb_transfer(dev); return 0; } /* Send command (no data) */ static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush) { dev->obuffer[dev->olen++] = command; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with one byte of data */ static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = data; dev->ocount++; return diolan_write_cmd(dev, flush); } /* Send command with two bytes of data */ static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1, u8 d2, bool flush) { dev->obuffer[dev->olen++] = command; dev->obuffer[dev->olen++] = d1; dev->obuffer[dev->olen++] = d2; dev->ocount++; return diolan_write_cmd(dev, flush); } /* * Flush input queue. * If we don't do this at startup and the controller has queued up * messages which were not retrieved, it will stop responding * at some point. */ static void diolan_flush_input(struct i2c_diolan_u2c *dev) { int i; for (i = 0; i < 10; i++) { int actual = 0; int ret; ret = usb_bulk_msg(dev->usb_dev, usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP), dev->ibuffer, sizeof(dev->ibuffer), &actual, DIOLAN_USB_TIMEOUT); if (ret < 0 || actual == 0) break; } if (i == 10) dev_err(&dev->interface->dev, "Failed to flush input buffer\n"); } static int diolan_i2c_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_START, false); } static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false); } static int diolan_i2c_stop(struct i2c_diolan_u2c *dev) { return diolan_usb_cmd(dev, CMD_I2C_STOP, true); } static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack, u8 *byte) { int ret; ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true); if (ret > 0) *byte = dev->ibuffer[0]; else if (ret == 0) ret = -EIO; return ret; } static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte) { return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false); } static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true); } /* Enable or disable clock synchronization (stretching) */ static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable) { return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true); } /* Set clock synchronization timeout in ms */ static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms) { int to_val = ms * 10; return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO, to_val & 0xff, (to_val >> 8) & 0xff, true); } static void diolan_fw_version(struct i2c_diolan_u2c *dev) { int ret; ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true); if (ret >= 2) dev_info(&dev->interface->dev, "Diolan U2C firmware version %u.%u\n", (unsigned int)dev->ibuffer[0], (unsigned int)dev->ibuffer[1]); } static void diolan_get_serial(struct i2c_diolan_u2c *dev) { int ret; u32 serial; ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true); if (ret >= 4) { serial = le32_to_cpu(*(u32 *)dev->ibuffer); dev_info(&dev->interface->dev, "Diolan U2C serial number %u\n", serial); } } static int diolan_init(struct i2c_diolan_u2c *dev) { int speed, ret; if (frequency >= 200000) { speed = U2C_I2C_SPEED_FAST; frequency = U2C_I2C_FREQ_FAST; } else if (frequency >= 100000 || frequency == 0) { speed = U2C_I2C_SPEED_STD; frequency = U2C_I2C_FREQ_STD; } else { speed = U2C_I2C_SPEED(frequency); if (speed > U2C_I2C_SPEED_2KHZ) speed = U2C_I2C_SPEED_2KHZ; frequency = U2C_I2C_FREQ(speed); } dev_info(&dev->interface->dev, "Diolan U2C at USB bus %03d address %03d speed %d Hz\n", dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency); diolan_flush_input(dev); diolan_fw_version(dev); diolan_get_serial(dev); /* Set I2C speed */ ret = diolan_set_speed(dev, speed); if (ret < 0) return ret; /* Configure I2C clock synchronization */ ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST); if (ret < 0) return ret; if (speed != U2C_I2C_SPEED_FAST) ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT); return ret; } /* i2c layer */ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter); struct i2c_msg *pmsg; int i, j; int ret, sret; ret = diolan_i2c_start(dev); if (ret < 0) return ret; for (i = 0; i < num; i++) { pmsg = &msgs[i]; if (i) { ret = diolan_i2c_repeated_start(dev); if (ret < 0) goto abort; } if (pmsg->flags & I2C_M_RD) { ret = diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1); if (ret < 0) goto abort; for (j = 0; j < pmsg->len; j++) { u8 byte; bool ack = j < pmsg->len - 1; /* * Don't send NACK if this is the first byte * of a SMBUS_BLOCK message. */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) ack = true; ret = diolan_i2c_get_byte_ack(dev, ack, &byte); if (ret < 0) goto abort; /* * Adjust count if first received byte is length */ if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) { if (byte == 0 || byte > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto abort; } pmsg->len += byte; } pmsg->buf[j] = byte; } } else { ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1); if (ret < 0) goto abort; for (j = 0; j < pmsg->len; j++) { ret = diolan_i2c_put_byte_ack(dev, pmsg->buf[j]); if (ret < 0) goto abort; } } } abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) ret = sret; return ret; } /* * Return list of supported functionality. */ static u32 diolan_usb_func(struct i2c_adapter *a) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm diolan_usb_algorithm = { .master_xfer = diolan_usb_xfer, .functionality = diolan_usb_func, }; /* device layer */ static const struct usb_device_id diolan_u2c_table[] = { { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) }, { } }; MODULE_DEVICE_TABLE(usb, diolan_u2c_table); static void diolan_u2c_free(struct i2c_diolan_u2c *dev) { usb_put_dev(dev->usb_dev); kfree(dev); } static int diolan_u2c_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct i2c_diolan_u2c *dev; int ret; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "no memory for device state\n"); ret = -ENOMEM; goto error; } dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* setup i2c adapter description */ dev->adapter.owner = THIS_MODULE; dev->adapter.class = I2C_CLASS_HWMON; dev->adapter.algo = &diolan_usb_algorithm; i2c_set_adapdata(&dev->adapter, dev); snprintf(dev->adapter.name, sizeof(dev->adapter.name), DRIVER_NAME " at bus %03d device %03d", dev->usb_dev->bus->busnum, dev->usb_dev->devnum); dev->adapter.dev.parent = &dev->interface->dev; /* initialize diolan i2c interface */ ret = diolan_init(dev); if (ret < 0) { dev_err(&interface->dev, "failed to initialize adapter\n"); goto error_free; } /* and finally attach to i2c layer */ ret = i2c_add_adapter(&dev->adapter); if (ret < 0) { dev_err(&interface->dev, "failed to add I2C adapter\n"); goto error_free; } dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n"); return 0; error_free: usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); error: return ret; } static void diolan_u2c_disconnect(struct usb_interface *interface) { struct i2c_diolan_u2c *dev = usb_get_intfdata(interface); i2c_del_adapter(&dev->adapter); usb_set_intfdata(interface, NULL); diolan_u2c_free(dev); dev_dbg(&interface->dev, "disconnected\n"); } static struct usb_driver diolan_u2c_driver = { .name = DRIVER_NAME, .probe = diolan_u2c_probe, .disconnect = diolan_u2c_disconnect, .id_table = diolan_u2c_table, }; module_usb_driver(diolan_u2c_driver); MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>"); MODULE_DESCRIPTION(DRIVER_NAME " driver"); MODULE_LICENSE("GPL");
gpl-2.0
OptimusG-Dev-Team/caf-clean-kernel
arch/x86/pci/pcbios.c
4954
11174
/* * BIOS32 and PCI BIOS handling. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/pci_x86.h> #include <asm/pci-functions.h> #include <asm/cacheflush.h> /* BIOS32 signature: "_32_" */ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) /* PCI signature: "PCI " */ #define PCI_SIGNATURE (('P' << 0) + ('C' << 8) + ('I' << 16) + (' ' << 24)) /* PCI service signature: "$PCI" */ #define PCI_SERVICE (('$' << 0) + ('P' << 8) + ('C' << 16) + ('I' << 24)) /* PCI BIOS hardware mechanism flags */ #define PCIBIOS_HW_TYPE1 0x01 #define PCIBIOS_HW_TYPE2 0x02 #define PCIBIOS_HW_TYPE1_SPEC 0x10 #define PCIBIOS_HW_TYPE2_SPEC 0x20 int pcibios_enabled; /* According to the BIOS specification at: * http://members.datafast.net.au/dft0802/specs/bios21.pdf, we could * restrict the x zone to some pages and make it ro. But this may be * broken on some bios, complex to handle with static_protections. * We could make the 0xe0000-0x100000 range rox, but this can break * some ISA mapping. * * So we let's an rw and x hole when pcibios is used. This shouldn't * happen for modern system with mmconfig, and if you don't want it * you could disable pcibios... */ static inline void set_bios_x(void) { pcibios_enabled = 1; set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT); if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n"); } /* * This is the standard structure used to identify the entry point * to the BIOS32 Service Directory, as documented in * Standard BIOS 32-bit Service Directory Proposal * Revision 0.4 May 24, 1993 * Phoenix Technologies Ltd. * Norwood, MA * and the PCI BIOS specification. */ union bios32 { struct { unsigned long signature; /* _32_ */ unsigned long entry; /* 32 bit physical address */ unsigned char revision; /* Revision level, 0 */ unsigned char length; /* Length in paragraphs should be 01 */ unsigned char checksum; /* All bytes must add up to zero */ unsigned char reserved[5]; /* Must be zero */ } fields; char chars[16]; }; /* * Physical address of the service directory. I don't know if we're * allowed to have more than one of these or not, so just in case * we'll make pcibios_present() take a memory start parameter and store * the array there. */ static struct { unsigned long address; unsigned short segment; } bios32_indirect = { 0, __KERNEL_CS }; /* * Returns the entry point for the given service, NULL on error */ static unsigned long bios32_service(unsigned long service) { unsigned char return_code; /* %al */ unsigned long address; /* %ebx */ unsigned long length; /* %ecx */ unsigned long entry; /* %edx */ unsigned long flags; local_irq_save(flags); __asm__("lcall *(%%edi); cld" : "=a" (return_code), "=b" (address), "=c" (length), "=d" (entry) : "0" (service), "1" (0), "D" (&bios32_indirect)); local_irq_restore(flags); switch (return_code) { case 0: return address + entry; case 0x80: /* Not present */ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); return 0; default: /* Shouldn't happen */ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", service, return_code); return 0; } } static struct { unsigned long address; unsigned short segment; } pci_indirect = { 0, __KERNEL_CS }; static int pci_bios_present; static int __devinit check_pcibios(void) { u32 signature, eax, ebx, ecx; u8 status, major_ver, minor_ver, hw_mech; unsigned long flags, pcibios_entry; if ((pcibios_entry = bios32_service(PCI_SERVICE))) { pci_indirect.address = pcibios_entry + PAGE_OFFSET; local_irq_save(flags); __asm__( "lcall *(%%edi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=d" (signature), "=a" (eax), "=b" (ebx), "=c" (ecx) : "1" (PCIBIOS_PCI_BIOS_PRESENT), "D" (&pci_indirect) : "memory"); local_irq_restore(flags); status = (eax >> 8) & 0xff; hw_mech = eax & 0xff; major_ver = (ebx >> 8) & 0xff; minor_ver = ebx & 0xff; if (pcibios_last_bus < 0) pcibios_last_bus = ecx & 0xff; DBG("PCI: BIOS probe returned s=%02x hw=%02x ver=%02x.%02x l=%02x\n", status, hw_mech, major_ver, minor_ver, pcibios_last_bus); if (status || signature != PCI_SIGNATURE) { printk (KERN_ERR "PCI: BIOS BUG #%x[%08x] found\n", status, signature); return 0; } printk(KERN_INFO "PCI: PCI BIOS revision %x.%02x entry at 0x%lx, last bus=%d\n", major_ver, minor_ver, pcibios_entry, pcibios_last_bus); #ifdef CONFIG_PCI_DIRECT if (!(hw_mech & PCIBIOS_HW_TYPE1)) pci_probe &= ~PCI_PROBE_CONF1; if (!(hw_mech & PCIBIOS_HW_TYPE2)) pci_probe &= ~PCI_PROBE_CONF2; #endif return 1; } return 0; } static int pci_bios_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long result = 0; unsigned long flags; unsigned long bx = (bus << 8) | devfn; WARN_ON(seg); if (!value || (bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; raw_spin_lock_irqsave(&pci_config_lock, flags); switch (len) { case 1: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=c" (*value), "=a" (result) : "1" (PCIBIOS_READ_CONFIG_BYTE), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); /* * Zero-extend the result beyond 8 bits, do not trust the * BIOS having done it: */ *value &= 0xff; break; case 2: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=c" (*value), "=a" (result) : "1" (PCIBIOS_READ_CONFIG_WORD), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); /* * Zero-extend the result beyond 16 bits, do not trust the * BIOS having done it: */ *value &= 0xffff; break; case 4: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=c" (*value), "=a" (result) : "1" (PCIBIOS_READ_CONFIG_DWORD), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return (int)((result & 0xff00) >> 8); } static int pci_bios_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long result = 0; unsigned long flags; unsigned long bx = (bus << 8) | devfn; WARN_ON(seg); if ((bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; raw_spin_lock_irqsave(&pci_config_lock, flags); switch (len) { case 1: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=a" (result) : "0" (PCIBIOS_WRITE_CONFIG_BYTE), "c" (value), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); break; case 2: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=a" (result) : "0" (PCIBIOS_WRITE_CONFIG_WORD), "c" (value), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); break; case 4: __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=a" (result) : "0" (PCIBIOS_WRITE_CONFIG_DWORD), "c" (value), "b" (bx), "D" ((long)reg), "S" (&pci_indirect)); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return (int)((result & 0xff00) >> 8); } /* * Function table for BIOS32 access */ static const struct pci_raw_ops pci_bios_access = { .read = pci_bios_read, .write = pci_bios_write }; /* * Try to find PCI BIOS. */ static const struct pci_raw_ops * __devinit pci_find_bios(void) { union bios32 *check; unsigned char sum; int i, length; /* * Follow the standard procedure for locating the BIOS32 Service * directory by scanning the permissible address range from * 0xe0000 through 0xfffff for a valid BIOS32 structure. */ for (check = (union bios32 *) __va(0xe0000); check <= (union bios32 *) __va(0xffff0); ++check) { long sig; if (probe_kernel_address(&check->fields.signature, sig)) continue; if (check->fields.signature != BIOS32_SIGNATURE) continue; length = check->fields.length * 16; if (!length) continue; sum = 0; for (i = 0; i < length ; ++i) sum += check->chars[i]; if (sum != 0) continue; if (check->fields.revision != 0) { printk("PCI: unsupported BIOS32 revision %d at 0x%p\n", check->fields.revision, check); continue; } DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check); if (check->fields.entry >= 0x100000) { printk("PCI: BIOS32 entry (0x%p) in high memory, " "cannot use.\n", check); return NULL; } else { unsigned long bios32_entry = check->fields.entry; DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", bios32_entry); bios32_indirect.address = bios32_entry + PAGE_OFFSET; set_bios_x(); if (check_pcibios()) return &pci_bios_access; } break; /* Hopefully more than one BIOS32 cannot happen... */ } return NULL; } /* * BIOS Functions for IRQ Routing */ struct irq_routing_options { u16 size; struct irq_info *table; u16 segment; } __attribute__((packed)); struct irq_routing_table * pcibios_get_irq_routing_table(void) { struct irq_routing_options opt; struct irq_routing_table *rt = NULL; int ret, map; unsigned long page; if (!pci_bios_present) return NULL; page = __get_free_page(GFP_KERNEL); if (!page) return NULL; opt.table = (struct irq_info *) page; opt.size = PAGE_SIZE; opt.segment = __KERNEL_DS; DBG("PCI: Fetching IRQ routing table... "); __asm__("push %%es\n\t" "push %%ds\n\t" "pop %%es\n\t" "lcall *(%%esi); cld\n\t" "pop %%es\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=a" (ret), "=b" (map), "=m" (opt) : "0" (PCIBIOS_GET_ROUTING_OPTIONS), "1" (0), "D" ((long) &opt), "S" (&pci_indirect), "m" (opt) : "memory"); DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); if (ret & 0xff00) printk(KERN_ERR "PCI: Error %02x when fetching IRQ routing table.\n", (ret >> 8) & 0xff); else if (opt.size) { rt = kmalloc(sizeof(struct irq_routing_table) + opt.size, GFP_KERNEL); if (rt) { memset(rt, 0, sizeof(struct irq_routing_table)); rt->size = opt.size + sizeof(struct irq_routing_table); rt->exclusive_irqs = map; memcpy(rt->slots, (void *) page, opt.size); printk(KERN_INFO "PCI: Using BIOS Interrupt Routing Table\n"); } } free_page(page); return rt; } EXPORT_SYMBOL(pcibios_get_irq_routing_table); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq) { int ret; __asm__("lcall *(%%esi); cld\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" : "=a" (ret) : "0" (PCIBIOS_SET_PCI_HW_INT), "b" ((dev->bus->number << 8) | dev->devfn), "c" ((irq << 8) | (pin + 10)), "S" (&pci_indirect)); return !(ret & 0xff00); } EXPORT_SYMBOL(pcibios_set_irq_routing); void __init pci_pcbios_init(void) { if ((pci_probe & PCI_PROBE_BIOS) && ((raw_pci_ops = pci_find_bios()))) { pci_bios_present = 1; } }
gpl-2.0
p2pjack/furnace_kernel_lge_mako
drivers/input/serio/ct82c710.c
8282
6791
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * 82C710 C&T mouse port chip driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/delay.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/serio.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/io.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("82C710 C&T mouse port chip driver"); MODULE_LICENSE("GPL"); /* * ct82c710 interface */ #define CT82C710_DEV_IDLE 0x01 /* Device Idle */ #define CT82C710_RX_FULL 0x02 /* Device Char received */ #define CT82C710_TX_IDLE 0x04 /* Device XMIT Idle */ #define CT82C710_RESET 0x08 /* Device Reset */ #define CT82C710_INTS_ON 0x10 /* Device Interrupt On */ #define CT82C710_ERROR_FLAG 0x20 /* Device Error */ #define CT82C710_CLEAR 0x40 /* Device Clear */ #define CT82C710_ENABLE 0x80 /* Device Enable */ #define CT82C710_IRQ 12 #define CT82C710_DATA ct82c710_iores.start #define CT82C710_STATUS (ct82c710_iores.start + 1) static struct serio *ct82c710_port; static struct platform_device *ct82c710_device; static struct resource ct82c710_iores; /* * Interrupt handler for the 82C710 mouse port. A character * is waiting in the 82C710. */ static irqreturn_t ct82c710_interrupt(int cpl, void *dev_id) { return serio_interrupt(ct82c710_port, inb(CT82C710_DATA), 0); } /* * Wait for device to send output char and flush any input char. */ static int ct82c170_wait(void) { int timeout = 60000; while ((inb(CT82C710_STATUS) & (CT82C710_RX_FULL | CT82C710_TX_IDLE | CT82C710_DEV_IDLE)) != (CT82C710_DEV_IDLE | CT82C710_TX_IDLE) && timeout) { if (inb_p(CT82C710_STATUS) & CT82C710_RX_FULL) inb_p(CT82C710_DATA); udelay(1); timeout--; } return !timeout; } static void ct82c710_close(struct serio *serio) { if (ct82c170_wait()) printk(KERN_WARNING "ct82c710.c: Device busy in close()\n"); outb_p(inb_p(CT82C710_STATUS) & ~(CT82C710_ENABLE | CT82C710_INTS_ON), CT82C710_STATUS); if (ct82c170_wait()) printk(KERN_WARNING "ct82c710.c: Device busy in close()\n"); free_irq(CT82C710_IRQ, NULL); } static int ct82c710_open(struct serio *serio) { unsigned char status; int err; err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL); if (err) return err; status = inb_p(CT82C710_STATUS); status |= (CT82C710_ENABLE | CT82C710_RESET); outb_p(status, CT82C710_STATUS); status &= ~(CT82C710_RESET); outb_p(status, CT82C710_STATUS); status |= CT82C710_INTS_ON; outb_p(status, CT82C710_STATUS); /* Enable interrupts */ while (ct82c170_wait()) { printk(KERN_ERR "ct82c710: Device busy in open()\n"); status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON); outb_p(status, CT82C710_STATUS); free_irq(CT82C710_IRQ, NULL); return -EBUSY; } return 0; } /* * Write to the 82C710 mouse device. */ static int ct82c710_write(struct serio *port, unsigned char c) { if (ct82c170_wait()) return -1; outb_p(c, CT82C710_DATA); return 0; } /* * See if we can find a 82C710 device. Read mouse address. */ static int __init ct82c710_detect(void) { outb_p(0x55, 0x2fa); /* Any value except 9, ff or 36 */ outb_p(0xaa, 0x3fa); /* Inverse of 55 */ outb_p(0x36, 0x3fa); /* Address the chip */ outb_p(0xe4, 0x3fa); /* 390/4; 390 = config address */ outb_p(0x1b, 0x2fa); /* Inverse of e4 */ outb_p(0x0f, 0x390); /* Write index */ if (inb_p(0x391) != 0xe4) /* Config address found? */ return -ENODEV; /* No: no 82C710 here */ outb_p(0x0d, 0x390); /* Write index */ ct82c710_iores.start = inb_p(0x391) << 2; /* Get mouse I/O address */ ct82c710_iores.end = ct82c710_iores.start + 1; ct82c710_iores.flags = IORESOURCE_IO; outb_p(0x0f, 0x390); outb_p(0x0f, 0x391); /* Close config mode */ return 0; } static int __devinit ct82c710_probe(struct platform_device *dev) { ct82c710_port = kzalloc(sizeof(struct serio), GFP_KERNEL); if (!ct82c710_port) return -ENOMEM; ct82c710_port->id.type = SERIO_8042; ct82c710_port->dev.parent = &dev->dev; ct82c710_port->open = ct82c710_open; ct82c710_port->close = ct82c710_close; ct82c710_port->write = ct82c710_write; strlcpy(ct82c710_port->name, "C&T 82c710 mouse port", sizeof(ct82c710_port->name)); snprintf(ct82c710_port->phys, sizeof(ct82c710_port->phys), "isa%16llx/serio0", (unsigned long long)CT82C710_DATA); serio_register_port(ct82c710_port); printk(KERN_INFO "serio: C&T 82c710 mouse port at %#llx irq %d\n", (unsigned long long)CT82C710_DATA, CT82C710_IRQ); return 0; } static int __devexit ct82c710_remove(struct platform_device *dev) { serio_unregister_port(ct82c710_port); return 0; } static struct platform_driver ct82c710_driver = { .driver = { .name = "ct82c710", .owner = THIS_MODULE, }, .probe = ct82c710_probe, .remove = __devexit_p(ct82c710_remove), }; static int __init ct82c710_init(void) { int error; error = ct82c710_detect(); if (error) return error; error = platform_driver_register(&ct82c710_driver); if (error) return error; ct82c710_device = platform_device_alloc("ct82c710", -1); if (!ct82c710_device) { error = -ENOMEM; goto err_unregister_driver; } error = platform_device_add_resources(ct82c710_device, &ct82c710_iores, 1); if (error) goto err_free_device; error = platform_device_add(ct82c710_device); if (error) goto err_free_device; return 0; err_free_device: platform_device_put(ct82c710_device); err_unregister_driver: platform_driver_unregister(&ct82c710_driver); return error; } static void __exit ct82c710_exit(void) { platform_device_unregister(ct82c710_device); platform_driver_unregister(&ct82c710_driver); } module_init(ct82c710_init); module_exit(ct82c710_exit);
gpl-2.0
dedzt16/dedzt16
arch/powerpc/platforms/52xx/lite5200_pm.c
9050
6385
#include <linux/init.h> #include <linux/suspend.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> #include <asm/switch_to.h> /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); static struct mpc52xx_cdm __iomem *cdm; static struct mpc52xx_intr __iomem *pic; static struct mpc52xx_sdma __iomem *bes; static struct mpc52xx_xlb __iomem *xlb; static struct mpc52xx_gpio __iomem *gps; static struct mpc52xx_gpio_wkup __iomem *gpw; static void __iomem *pci; static void __iomem *sram; static const int sram_size = 0x4000; /* 16 kBytes */ static void __iomem *mbar; static suspend_state_t lite5200_pm_target_state; static int lite5200_pm_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static int lite5200_pm_begin(suspend_state_t state) { if (lite5200_pm_valid(state)) { lite5200_pm_target_state = state; return 0; } return -EINVAL; } static int lite5200_pm_prepare(void) { struct device_node *np; const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ { .type = "builtin", .compatible = "mpc5200", }, /* efika */ {} }; u64 regaddr64 = 0; const u32 *regaddr_p; /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) return mpc52xx_pm_prepare(); if (lite5200_pm_target_state != PM_SUSPEND_MEM) return -EINVAL; /* map registers */ np = of_find_matching_node(NULL, immr_ids); regaddr_p = of_get_address(np, 0, NULL, NULL); if (regaddr_p) regaddr64 = of_translate_address(np, regaddr_p); of_node_put(np); mbar = ioremap((u32) regaddr64, 0xC000); if (!mbar) { printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); return -ENOSYS; } cdm = mbar + 0x200; pic = mbar + 0x500; gps = mbar + 0xb00; gpw = mbar + 0xc00; pci = mbar + 0xd00; bes = mbar + 0x1200; xlb = mbar + 0x1f00; sram = mbar + 0x8000; return 0; } /* save and restore registers not bound to any real devices */ static struct mpc52xx_cdm scdm; static struct mpc52xx_intr spic; static struct mpc52xx_sdma sbes; static struct mpc52xx_xlb sxlb; static struct mpc52xx_gpio sgps; static struct mpc52xx_gpio_wkup sgpw; static char spci[0x200]; static void lite5200_save_regs(void) { _memcpy_fromio(&spic, pic, sizeof(*pic)); _memcpy_fromio(&sbes, bes, sizeof(*bes)); _memcpy_fromio(&scdm, cdm, sizeof(*cdm)); _memcpy_fromio(&sxlb, xlb, sizeof(*xlb)); _memcpy_fromio(&sgps, gps, sizeof(*gps)); _memcpy_fromio(&sgpw, gpw, sizeof(*gpw)); _memcpy_fromio(spci, pci, 0x200); _memcpy_fromio(saved_sram, sram, sram_size); } static void lite5200_restore_regs(void) { int i; _memcpy_toio(sram, saved_sram, sram_size); /* PCI Configuration */ _memcpy_toio(pci, spci, 0x200); /* * GPIOs. Interrupt Master Enable has higher address then other * registers, so just memcpy is ok. */ _memcpy_toio(gpw, &sgpw, sizeof(*gpw)); _memcpy_toio(gps, &sgps, sizeof(*gps)); /* XLB Arbitrer */ out_be32(&xlb->snoop_window, sxlb.snoop_window); out_be32(&xlb->master_priority, sxlb.master_priority); out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable); /* enable */ out_be32(&xlb->int_enable, sxlb.int_enable); out_be32(&xlb->config, sxlb.config); /* CDM - Clock Distribution Module */ out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel); out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel); out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en); out_8(&cdm->fd_enable, scdm.fd_enable); out_be16(&cdm->fd_counters, scdm.fd_counters); out_be32(&cdm->clk_enables, scdm.clk_enables); out_8(&cdm->osc_disable, scdm.osc_disable); out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1); out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2); out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3); out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6); /* BESTCOMM */ out_be32(&bes->taskBar, sbes.taskBar); out_be32(&bes->currentPointer, sbes.currentPointer); out_be32(&bes->endPointer, sbes.endPointer); out_be32(&bes->variablePointer, sbes.variablePointer); out_8(&bes->IntVect1, sbes.IntVect1); out_8(&bes->IntVect2, sbes.IntVect2); out_be16(&bes->PtdCntrl, sbes.PtdCntrl); for (i=0; i<32; i++) out_8(&bes->ipr[i], sbes.ipr[i]); out_be32(&bes->cReqSelect, sbes.cReqSelect); out_be32(&bes->task_size0, sbes.task_size0); out_be32(&bes->task_size1, sbes.task_size1); out_be32(&bes->MDEDebug, sbes.MDEDebug); out_be32(&bes->ADSDebug, sbes.ADSDebug); out_be32(&bes->Value1, sbes.Value1); out_be32(&bes->Value2, sbes.Value2); out_be32(&bes->Control, sbes.Control); out_be32(&bes->Status, sbes.Status); out_be32(&bes->PTDDebug, sbes.PTDDebug); /* restore tasks */ for (i=0; i<16; i++) out_be16(&bes->tcr[i], sbes.tcr[i]); /* enable interrupts */ out_be32(&bes->IntPend, sbes.IntPend); out_be32(&bes->IntMask, sbes.IntMask); /* PIC */ out_be32(&pic->per_pri1, spic.per_pri1); out_be32(&pic->per_pri2, spic.per_pri2); out_be32(&pic->per_pri3, spic.per_pri3); out_be32(&pic->main_pri1, spic.main_pri1); out_be32(&pic->main_pri2, spic.main_pri2); out_be32(&pic->enc_status, spic.enc_status); /* unmask and enable interrupts */ out_be32(&pic->per_mask, spic.per_mask); out_be32(&pic->main_mask, spic.main_mask); out_be32(&pic->ctrl, spic.ctrl); } static int lite5200_pm_enter(suspend_state_t state) { /* deep sleep? let mpc52xx code handle that */ if (state == PM_SUSPEND_STANDBY) { return mpc52xx_pm_enter(state); } lite5200_save_regs(); /* effectively save FP regs */ enable_kernel_fp(); lite5200_low_power(sram, mbar); lite5200_restore_regs(); iounmap(mbar); return 0; } static void lite5200_pm_finish(void) { /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) mpc52xx_pm_finish(); } static void lite5200_pm_end(void) { lite5200_pm_target_state = PM_SUSPEND_ON; } static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, .enter = lite5200_pm_enter, .finish = lite5200_pm_finish, .end = lite5200_pm_end, }; int __init lite5200_pm_init(void) { suspend_set_ops(&lite5200_pm_ops); return 0; }
gpl-2.0
rpdroky/ArchiKernel
arch/um/drivers/slip_common.c
9818
1128
#include <string.h> #include "slip_common.h" #include "net_user.h" int slip_proto_read(int fd, void *buf, int len, struct slip_proto *slip) { int i, n, size, start; if(slip->more > 0){ i = 0; while(i < slip->more){ size = slip_unesc(slip->ibuf[i++], slip->ibuf, &slip->pos, &slip->esc); if(size){ memcpy(buf, slip->ibuf, size); memmove(slip->ibuf, &slip->ibuf[i], slip->more - i); slip->more = slip->more - i; return size; } } slip->more = 0; } n = net_read(fd, &slip->ibuf[slip->pos], sizeof(slip->ibuf) - slip->pos); if(n <= 0) return n; start = slip->pos; for(i = 0; i < n; i++){ size = slip_unesc(slip->ibuf[start + i], slip->ibuf,&slip->pos, &slip->esc); if(size){ memcpy(buf, slip->ibuf, size); memmove(slip->ibuf, &slip->ibuf[start+i+1], n - (i + 1)); slip->more = n - (i + 1); return size; } } return 0; } int slip_proto_write(int fd, void *buf, int len, struct slip_proto *slip) { int actual, n; actual = slip_esc(buf, slip->obuf, len); n = net_write(fd, slip->obuf, actual); if(n < 0) return n; else return len; }
gpl-2.0
sattarvoybek/android_kernel_zte_p839f30
drivers/cpufreq/mperf.c
11610
1495
#include <linux/kernel.h> #include <linux/smp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/slab.h> #include "mperf.h" static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); /* Called via smp_call_function_single(), on the target CPU */ static void read_measured_perf_ctrs(void *_cur) { struct aperfmperf *am = _cur; get_aperfmperf(am); } /* * Return the measured active (C0) frequency on this CPU since last call * to this function. * Input: cpu number * Return: Average CPU frequency in terms of max frequency (zero on error) * * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance * over a period of time, while CPU is in C0 state. * IA32_MPERF counts at the rate of max advertised frequency * IA32_APERF counts at the rate of actual CPU frequency * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and * no meaning should be associated with absolute values of these MSRs. */ unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, unsigned int cpu) { struct aperfmperf perf; unsigned long ratio; unsigned int retval; if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) return 0; ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); per_cpu(acfreq_old_perf, cpu) = perf; retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; return retval; } EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); MODULE_LICENSE("GPL");
gpl-2.0
cneira/ebpf-backports
linux-3.10.0-514.21.1.el7.x86_64/drivers/s390/cio/itcw.c
12890
11950
/* * Functions for incremental construction of fcx enabled I/O control blocks. * * Copyright IBM Corp. 2008 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/module.h> #include <asm/fcx.h> #include <asm/itcw.h> /** * struct itcw - incremental tcw helper data type * * This structure serves as a handle for the incremental construction of a * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate * tcw and associated data. The data structures are contained inside a single * contiguous buffer provided by the user. * * The itcw construction functions take care of overall data integrity: * - reset unused fields to zero * - fill in required pointers * - ensure required alignment for data structures * - prevent data structures to cross 4k-byte boundary where required * - calculate tccb-related length fields * - optionally provide ready-made interrogate tcw and associated structures * * Restrictions apply to the itcws created with these construction functions: * - tida only supported for data address, not for tccb * - only contiguous tidaw-lists (no ttic) * - total number of bytes required per itcw may not exceed 4k bytes * - either read or write operation (may not work with r=0 and w=0) * * Example: * struct itcw *itcw; * void *buffer; * size_t size; * * size = itcw_calc_size(1, 2, 0); * buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); * if (!buffer) * return -ENOMEM; * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); * if (IS_ERR(itcw)) * return PTR_ER(itcw); * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); * itcw_add_tidaw(itcw, 0, 0x30000, 20); * itcw_add_tidaw(itcw, 0, 0x40000, 52); * itcw_finalize(itcw); * */ struct itcw { struct tcw *tcw; struct tcw *intrg_tcw; int num_tidaws; int max_tidaws; int intrg_num_tidaws; int intrg_max_tidaws; }; /** * itcw_get_tcw - return pointer to tcw associated with the itcw * @itcw: address of the itcw * * Return pointer to the tcw associated with the itcw. */ struct tcw *itcw_get_tcw(struct itcw *itcw) { return itcw->tcw; } EXPORT_SYMBOL(itcw_get_tcw); /** * itcw_calc_size - return the size of an itcw with the given parameters * @intrg: if non-zero, add an interrogate tcw * @max_tidaws: maximum number of tidaws to be used for data addressing or zero * if no tida is to be used. * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing * by the interrogate tcw, if specified * * Calculate and return the number of bytes required to hold an itcw with the * given parameters and assuming tccbs with maximum size. * * Note that the resulting size also contains bytes needed for alignment * padding as well as padding to ensure that data structures don't cross a * 4k-boundary where required. */ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) { size_t len; int cross_count; /* Main data. */ len = sizeof(struct itcw); len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + /* TSB */ sizeof(struct tsb) + /* TIDAL */ max_tidaws * sizeof(struct tidaw); /* Interrogate data. */ if (intrg) { len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + /* TSB */ sizeof(struct tsb) + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); } /* Maximum required alignment padding. */ len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; /* TIDAW lists may not cross a 4k boundary. To cross a * boundary we need to add a TTIC TIDAW. We need to reserve * one additional TIDAW for a TTIC that we may need to add due * to the placement of the data chunk in memory, and a further * TIDAW for each page boundary that the TIDAW list may cross * due to it's own size. */ if (max_tidaws) { cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); len += cross_count * sizeof(struct tidaw); } if (intrg_max_tidaws) { cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); len += cross_count * sizeof(struct tidaw); } return len; } EXPORT_SYMBOL(itcw_calc_size); #define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, int align, int check_4k) { addr_t addr; addr = ALIGN(*start, align); if (check_4k && CROSS4K(addr, len)) { addr = ALIGN(addr, 4096); addr = ALIGN(addr, align); } if (addr + len > end) return ERR_PTR(-ENOSPC); *start = addr + len; return (void *) addr; } /** * itcw_init - initialize incremental tcw data structure * @buffer: address of buffer to use for data structures * @size: number of bytes in buffer * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write * operation tcw * @intrg: if non-zero, add and initialize an interrogate tcw * @max_tidaws: maximum number of tidaws to be used for data addressing or zero * if no tida is to be used. * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing * by the interrogate tcw, if specified * * Prepare the specified buffer to be used as an incremental tcw, i.e. a * helper data structure that can be used to construct a valid tcw by * successive calls to other helper functions. Note: the buffer needs to be * located below the 2G address limit. The resulting tcw has the following * restrictions: * - no tccb tidal * - input/output tidal is contiguous (no ttic) * - total data should not exceed 4k * - tcw specifies either read or write operation * * On success, return pointer to the resulting incremental tcw data structure, * ERR_PTR otherwise. */ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, int max_tidaws, int intrg_max_tidaws) { struct itcw *itcw; void *chunk; addr_t start; addr_t end; int cross_count; /* Check for 2G limit. */ start = (addr_t) buffer; end = start + size; if (end > (1 << 31)) return ERR_PTR(-EINVAL); memset(buffer, 0, size); /* ITCW. */ chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); if (IS_ERR(chunk)) return chunk; itcw = chunk; /* allow for TTIC tidaws that may be needed to cross a page boundary */ cross_count = 0; if (max_tidaws) cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); itcw->max_tidaws = max_tidaws + cross_count; cross_count = 0; if (intrg_max_tidaws) cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1) >> PAGE_SHIFT); itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count; /* Main TCW. */ chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); if (IS_ERR(chunk)) return chunk; itcw->tcw = chunk; tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, (op == ITCW_OP_WRITE) ? 1 : 0); /* Interrogate TCW. */ if (intrg) { chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); if (IS_ERR(chunk)) return chunk; itcw->intrg_tcw = chunk; tcw_init(itcw->intrg_tcw, 1, 0); tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); } /* Data TIDAL. */ if (max_tidaws > 0) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * itcw->max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->tcw, chunk, 1); } /* Interrogate data TIDAL. */ if (intrg && (intrg_max_tidaws > 0)) { chunk = fit_chunk(&start, end, sizeof(struct tidaw) * itcw->intrg_max_tidaws, 16, 0); if (IS_ERR(chunk)) return chunk; tcw_set_data(itcw->intrg_tcw, chunk, 1); } /* TSB. */ chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); if (IS_ERR(chunk)) return chunk; tsb_init(chunk); tcw_set_tsb(itcw->tcw, chunk); /* Interrogate TSB. */ if (intrg) { chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); if (IS_ERR(chunk)) return chunk; tsb_init(chunk); tcw_set_tsb(itcw->intrg_tcw, chunk); } /* TCCB. */ chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); if (IS_ERR(chunk)) return chunk; tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); tcw_set_tccb(itcw->tcw, chunk); /* Interrogate TCCB. */ if (intrg) { chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); if (IS_ERR(chunk)) return chunk; tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); tcw_set_tccb(itcw->intrg_tcw, chunk); tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, sizeof(struct dcw_intrg_data), 0); tcw_finalize(itcw->intrg_tcw, 0); } return itcw; } EXPORT_SYMBOL(itcw_init); /** * itcw_add_dcw - add a dcw to the itcw * @itcw: address of the itcw * @cmd: the dcw command * @flags: flags for the dcw * @cd: address of control data for this dcw or NULL if none is required * @cd_count: number of control data bytes for this dcw * @count: number of data bytes for this dcw * * Add a new dcw to the specified itcw by writing the dcw information specified * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw * would exceed the available space. * * Note: the tcal field of the tccb header will be updated to reflect added * content. */ struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, u8 cd_count, u32 count) { return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, flags, cd, cd_count, count); } EXPORT_SYMBOL(itcw_add_dcw); /** * itcw_add_tidaw - add a tidaw to the itcw * @itcw: address of the itcw * @flags: flags for the new tidaw * @addr: address value for the new tidaw * @count: count value for the new tidaw * * Add a new tidaw to the input/output data tidaw-list of the specified itcw * (depending on the value of the r-flag and w-flag). Return a pointer to * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the * available space. * * Note: TTIC tidaws are automatically added when needed, so explicitly calling * this interface with the TTIC flag is not supported. The last-tidaw flag * for the last tidaw in the list will be set by itcw_finalize. */ struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) { struct tidaw *following; if (itcw->num_tidaws >= itcw->max_tidaws) return ERR_PTR(-ENOSPC); /* * Is the tidaw, which follows the one we are about to fill, on the next * page? Then we have to insert a TTIC tidaw first, that points to the * tidaw on the new page. */ following = ((struct tidaw *) tcw_get_data(itcw->tcw)) + itcw->num_tidaws + 1; if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) { tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, TIDAW_FLAGS_TTIC, following, 0); if (itcw->num_tidaws >= itcw->max_tidaws) return ERR_PTR(-ENOSPC); } return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); } EXPORT_SYMBOL(itcw_add_tidaw); /** * itcw_set_data - set data address and tida flag of the itcw * @itcw: address of the itcw * @addr: the data address * @use_tidal: zero of the data address specifies a contiguous block of data, * non-zero if it specifies a list if tidaws. * * Set the input/output data address of the itcw (depending on the value of the * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag * is set as well. */ void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) { tcw_set_data(itcw->tcw, addr, use_tidal); } EXPORT_SYMBOL(itcw_set_data); /** * itcw_finalize - calculate length and count fields of the itcw * @itcw: address of the itcw * * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. * In case input- or output-tida is used, the tidaw-list must be stored in * continuous storage (no ttic). The tcal field in the tccb must be * up-to-date. */ void itcw_finalize(struct itcw *itcw) { tcw_finalize(itcw->tcw, itcw->num_tidaws); } EXPORT_SYMBOL(itcw_finalize);
gpl-2.0
GustavoRD78/78Kernel-ZL-new-construction-283
arch/mips/sibyte/common/cfe_console.c
13658
1737
#include <linux/init.h> #include <linux/errno.h> #include <linux/console.h> #include <asm/sibyte/board.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> extern int cfe_cons_handle; static void cfe_console_write(struct console *cons, const char *str, unsigned int count) { int i, last, written; for (i=0, last=0; i<count; i++) { if (!str[i]) /* XXXKW can/should this ever happen? */ return; if (str[i] == '\n') { do { written = cfe_write(cfe_cons_handle, &str[last], i-last); if (written < 0) ; last += written; } while (last < i); while (cfe_write(cfe_cons_handle, "\r", 1) <= 0) ; } } if (last != count) { do { written = cfe_write(cfe_cons_handle, &str[last], count-last); if (written < 0) ; last += written; } while (last < count); } } static int cfe_console_setup(struct console *cons, char *str) { char consdev[32]; /* XXXKW think about interaction with 'console=' cmdline arg */ /* If none of the console options are configured, the build will break. */ if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) { #ifdef CONFIG_SERIAL_SB1250_DUART if (!strcmp(consdev, "uart0")) { setleds("u0cn"); } else if (!strcmp(consdev, "uart1")) { setleds("u1cn"); } else #endif #ifdef CONFIG_VGA_CONSOLE if (!strcmp(consdev, "pcconsole0")) { setleds("pccn"); } else #endif return -ENODEV; } return 0; } static struct console sb1250_cfe_cons = { .name = "cfe", .write = cfe_console_write, .setup = cfe_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init sb1250_cfe_console_init(void) { register_console(&sb1250_cfe_cons); return 0; } console_initcall(sb1250_cfe_console_init);
gpl-2.0
ronenil/net-next
fs/fuse/dev.c
91
51754
/* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/init.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/uio.h> #include <linux/miscdevice.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/pipe_fs_i.h> #include <linux/swap.h> #include <linux/splice.h> #include <linux/aio.h> MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; static struct fuse_conn *fuse_get_conn(struct file *file) { /* * Lockless access is OK, because file->private data is set * once during mount and is valid until the file is released. */ return file->private_data; } static void fuse_request_init(struct fuse_req *req, struct page **pages, struct fuse_page_desc *page_descs, unsigned npages) { memset(req, 0, sizeof(*req)); memset(pages, 0, sizeof(*pages) * npages); memset(page_descs, 0, sizeof(*page_descs) * npages); INIT_LIST_HEAD(&req->list); INIT_LIST_HEAD(&req->intr_entry); init_waitqueue_head(&req->waitq); atomic_set(&req->count, 1); req->pages = pages; req->page_descs = page_descs; req->max_pages = npages; } static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) { struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); if (req) { struct page **pages; struct fuse_page_desc *page_descs; if (npages <= FUSE_REQ_INLINE_PAGES) { pages = req->inline_pages; page_descs = req->inline_page_descs; } else { pages = kmalloc(sizeof(struct page *) * npages, flags); page_descs = kmalloc(sizeof(struct fuse_page_desc) * npages, flags); } if (!pages || !page_descs) { kfree(pages); kfree(page_descs); kmem_cache_free(fuse_req_cachep, req); return NULL; } fuse_request_init(req, pages, page_descs, npages); } return req; } struct fuse_req *fuse_request_alloc(unsigned npages) { return __fuse_request_alloc(npages, GFP_KERNEL); } EXPORT_SYMBOL_GPL(fuse_request_alloc); struct fuse_req *fuse_request_alloc_nofs(unsigned npages) { return __fuse_request_alloc(npages, GFP_NOFS); } void fuse_request_free(struct fuse_req *req) { if (req->pages != req->inline_pages) { kfree(req->pages); kfree(req->page_descs); } kmem_cache_free(fuse_req_cachep, req); } static void block_sigs(sigset_t *oldset) { sigset_t mask; siginitsetinv(&mask, sigmask(SIGKILL)); sigprocmask(SIG_BLOCK, &mask, oldset); } static void restore_sigs(sigset_t *oldset) { sigprocmask(SIG_SETMASK, oldset, NULL); } void __fuse_get_request(struct fuse_req *req) { atomic_inc(&req->count); } /* Must be called with > 1 refcount */ static void __fuse_put_request(struct fuse_req *req) { BUG_ON(atomic_read(&req->count) < 2); atomic_dec(&req->count); } static void fuse_req_init_context(struct fuse_req *req) { req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid()); req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid()); req->in.h.pid = current->pid; } void fuse_set_initialized(struct fuse_conn *fc) { /* Make sure stores before this are seen on another CPU */ smp_wmb(); fc->initialized = 1; } static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { return !fc->initialized || (for_background && fc->blocked); } static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, bool for_background) { struct fuse_req *req; int err; atomic_inc(&fc->num_waiting); if (fuse_block_alloc(fc, for_background)) { sigset_t oldset; int intr; block_sigs(&oldset); intr = wait_event_interruptible_exclusive(fc->blocked_waitq, !fuse_block_alloc(fc, for_background)); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; } /* Matches smp_wmb() in fuse_set_initialized() */ smp_rmb(); err = -ENOTCONN; if (!fc->connected) goto out; req = fuse_request_alloc(npages); err = -ENOMEM; if (!req) { if (for_background) wake_up(&fc->blocked_waitq); goto out; } fuse_req_init_context(req); req->waiting = 1; req->background = for_background; return req; out: atomic_dec(&fc->num_waiting); return ERR_PTR(err); } struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) { return __fuse_get_req(fc, npages, false); } EXPORT_SYMBOL_GPL(fuse_get_req); struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, unsigned npages) { return __fuse_get_req(fc, npages, true); } EXPORT_SYMBOL_GPL(fuse_get_req_for_background); /* * Return request in fuse_file->reserved_req. However that may * currently be in use. If that is the case, wait for it to become * available. */ static struct fuse_req *get_reserved_req(struct fuse_conn *fc, struct file *file) { struct fuse_req *req = NULL; struct fuse_file *ff = file->private_data; do { wait_event(fc->reserved_req_waitq, ff->reserved_req); spin_lock(&fc->lock); if (ff->reserved_req) { req = ff->reserved_req; ff->reserved_req = NULL; req->stolen_file = get_file(file); } spin_unlock(&fc->lock); } while (!req); return req; } /* * Put stolen request back into fuse_file->reserved_req */ static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) { struct file *file = req->stolen_file; struct fuse_file *ff = file->private_data; spin_lock(&fc->lock); fuse_request_init(req, req->pages, req->page_descs, req->max_pages); BUG_ON(ff->reserved_req); ff->reserved_req = req; wake_up_all(&fc->reserved_req_waitq); spin_unlock(&fc->lock); fput(file); } /* * Gets a requests for a file operation, always succeeds * * This is used for sending the FLUSH request, which must get to * userspace, due to POSIX locks which may need to be unlocked. * * If allocation fails due to OOM, use the reserved request in * fuse_file. * * This is very unlikely to deadlock accidentally, since the * filesystem should not have it's own file open. If deadlock is * intentional, it can still be broken by "aborting" the filesystem. */ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, struct file *file) { struct fuse_req *req; atomic_inc(&fc->num_waiting); wait_event(fc->blocked_waitq, fc->initialized); /* Matches smp_wmb() in fuse_set_initialized() */ smp_rmb(); req = fuse_request_alloc(0); if (!req) req = get_reserved_req(fc, file); fuse_req_init_context(req); req->waiting = 1; req->background = 0; return req; } void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { if (atomic_dec_and_test(&req->count)) { if (unlikely(req->background)) { /* * We get here in the unlikely case that a background * request was allocated but not sent */ spin_lock(&fc->lock); if (!fc->blocked) wake_up(&fc->blocked_waitq); spin_unlock(&fc->lock); } if (req->waiting) atomic_dec(&fc->num_waiting); if (req->stolen_file) put_reserved_req(fc, req); else fuse_request_free(req); } } EXPORT_SYMBOL_GPL(fuse_put_request); static unsigned len_args(unsigned numargs, struct fuse_arg *args) { unsigned nbytes = 0; unsigned i; for (i = 0; i < numargs; i++) nbytes += args[i].size; return nbytes; } static u64 fuse_get_unique(struct fuse_conn *fc) { fc->reqctr++; /* zero is special */ if (fc->reqctr == 0) fc->reqctr = 1; return fc->reqctr; } static void queue_request(struct fuse_conn *fc, struct fuse_req *req) { req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); list_add_tail(&req->list, &fc->pending); req->state = FUSE_REQ_PENDING; if (!req->waiting) { req->waiting = 1; atomic_inc(&fc->num_waiting); } wake_up(&fc->waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, u64 nodeid, u64 nlookup) { forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; spin_lock(&fc->lock); if (fc->connected) { fc->forget_list_tail->next = forget; fc->forget_list_tail = forget; wake_up(&fc->waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } else { kfree(forget); } spin_unlock(&fc->lock); } static void flush_bg_queue(struct fuse_conn *fc) { while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; req = list_entry(fc->bg_queue.next, struct fuse_req, list); list_del(&req->list); fc->active_background++; req->in.h.unique = fuse_get_unique(fc); queue_request(fc, req); } } /* * This function is called when a request is finished. Either a reply * has arrived or it was aborted (and not yet sent) or some error * occurred during communication with userspace, or the device file * was closed. The requester thread is woken up (if still waiting), * the 'end' callback is called if given, else the reference to the * request is released * * Called with fc->lock, unlocks it */ static void request_end(struct fuse_conn *fc, struct fuse_req *req) __releases(fc->lock) { void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; req->end = NULL; list_del(&req->list); list_del(&req->intr_entry); req->state = FUSE_REQ_FINISHED; if (req->background) { req->background = 0; if (fc->num_background == fc->max_background) fc->blocked = 0; /* Wake up next waiter, if any */ if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) wake_up(&fc->blocked_waitq); if (fc->num_background == fc->congestion_threshold && fc->connected && fc->bdi_initialized) { clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); } fc->num_background--; fc->active_background--; flush_bg_queue(fc); } spin_unlock(&fc->lock); wake_up(&req->waitq); if (end) end(fc, req); fuse_put_request(fc, req); } static void wait_answer_interruptible(struct fuse_conn *fc, struct fuse_req *req) __releases(fc->lock) __acquires(fc->lock) { if (signal_pending(current)) return; spin_unlock(&fc->lock); wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); } static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) { list_add_tail(&req->intr_entry, &fc->interrupts); wake_up(&fc->waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) __releases(fc->lock) __acquires(fc->lock) { if (!fc->no_interrupt) { /* Any signal may interrupt this */ wait_answer_interruptible(fc, req); if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; req->interrupted = 1; if (req->state == FUSE_REQ_SENT) queue_interrupt(fc, req); } if (!req->force) { sigset_t oldset; /* Only fatal signals may interrupt this */ block_sigs(&oldset); wait_answer_interruptible(fc, req); restore_sigs(&oldset); if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; /* Request is not yet in userspace, bail out */ if (req->state == FUSE_REQ_PENDING) { list_del(&req->list); __fuse_put_request(req); req->out.h.error = -EINTR; return; } } /* * Either request is already in userspace, or it was forced. * Wait it out. */ spin_unlock(&fc->lock); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); if (!req->aborted) return; aborted: BUG_ON(req->state != FUSE_REQ_FINISHED); if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During locked state, there mustn't be any filesystem operation (e.g. page fault), since that could lead to deadlock */ spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); spin_lock(&fc->lock); } } static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { BUG_ON(req->background); spin_lock(&fc->lock); if (!fc->connected) req->out.h.error = -ENOTCONN; else if (fc->conn_error) req->out.h.error = -ECONNREFUSED; else { req->in.h.unique = fuse_get_unique(fc); queue_request(fc, req); /* acquire extra reference, since request is still needed after request_end() */ __fuse_get_request(req); request_wait_answer(fc, req); } spin_unlock(&fc->lock); } void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; __fuse_request_send(fc, req); } EXPORT_SYMBOL_GPL(fuse_request_send); static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) { if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; if (fc->minor < 9) { switch (args->in.h.opcode) { case FUSE_LOOKUP: case FUSE_CREATE: case FUSE_MKNOD: case FUSE_MKDIR: case FUSE_SYMLINK: case FUSE_LINK: args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; break; case FUSE_GETATTR: case FUSE_SETATTR: args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; break; } } if (fc->minor < 12) { switch (args->in.h.opcode) { case FUSE_CREATE: args->in.args[0].size = sizeof(struct fuse_open_in); break; case FUSE_MKNOD: args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; break; } } } ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) { struct fuse_req *req; ssize_t ret; req = fuse_get_req(fc, 0); if (IS_ERR(req)) return PTR_ERR(req); /* Needs to be done after fuse_get_req() so that fc->minor is valid */ fuse_adjust_compat(fc, args); req->in.h.opcode = args->in.h.opcode; req->in.h.nodeid = args->in.h.nodeid; req->in.numargs = args->in.numargs; memcpy(req->in.args, args->in.args, args->in.numargs * sizeof(struct fuse_in_arg)); req->out.argvar = args->out.argvar; req->out.numargs = args->out.numargs; memcpy(req->out.args, args->out.args, args->out.numargs * sizeof(struct fuse_arg)); fuse_request_send(fc, req); ret = req->out.h.error; if (!ret && args->out.argvar) { BUG_ON(args->out.numargs != 1); ret = req->out.args[0].size; } fuse_put_request(fc, req); return ret; } static void fuse_request_send_nowait_locked(struct fuse_conn *fc, struct fuse_req *req) { BUG_ON(!req->background); fc->num_background++; if (fc->num_background == fc->max_background) fc->blocked = 1; if (fc->num_background == fc->congestion_threshold && fc->bdi_initialized) { set_bdi_congested(&fc->bdi, BLK_RW_SYNC); set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); } list_add_tail(&req->list, &fc->bg_queue); flush_bg_queue(fc); } static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) { spin_lock(&fc->lock); if (fc->connected) { fuse_request_send_nowait_locked(fc, req); spin_unlock(&fc->lock); } else { req->out.h.error = -ENOTCONN; request_end(fc, req); } } void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; fuse_request_send_nowait(fc, req); } EXPORT_SYMBOL_GPL(fuse_request_send_background); static int fuse_request_send_notify_reply(struct fuse_conn *fc, struct fuse_req *req, u64 unique) { int err = -ENODEV; req->isreply = 0; req->in.h.unique = unique; spin_lock(&fc->lock); if (fc->connected) { queue_request(fc, req); err = 0; } spin_unlock(&fc->lock); return err; } /* * Called under fc->lock * * fc->connected must have been checked previously */ void fuse_request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; fuse_request_send_nowait_locked(fc, req); } void fuse_force_forget(struct file *file, u64 nodeid) { struct inode *inode = file_inode(file); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_forget_in inarg; memset(&inarg, 0, sizeof(inarg)); inarg.nlookup = 1; req = fuse_get_req_nofail_nopages(fc, file); req->in.h.opcode = FUSE_FORGET; req->in.h.nodeid = nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->isreply = 0; __fuse_request_send(fc, req); /* ignore errors */ fuse_put_request(fc, req); } /* * Lock the request. Up to the next unlock_request() there mustn't be * anything that could cause a page-fault. If the request was already * aborted bail out. */ static int lock_request(struct fuse_conn *fc, struct fuse_req *req) { int err = 0; if (req) { spin_lock(&fc->lock); if (req->aborted) err = -ENOENT; else req->locked = 1; spin_unlock(&fc->lock); } return err; } /* * Unlock request. If it was aborted during being locked, the * requester thread is currently waiting for it to be unlocked, so * wake it up. */ static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) { if (req) { spin_lock(&fc->lock); req->locked = 0; if (req->aborted) wake_up(&req->waitq); spin_unlock(&fc->lock); } } struct fuse_copy_state { struct fuse_conn *fc; int write; struct fuse_req *req; const struct iovec *iov; struct pipe_buffer *pipebufs; struct pipe_buffer *currbuf; struct pipe_inode_info *pipe; unsigned long nr_segs; unsigned long seglen; unsigned long addr; struct page *pg; unsigned len; unsigned offset; unsigned move_pages:1; }; static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, int write, const struct iovec *iov, unsigned long nr_segs) { memset(cs, 0, sizeof(*cs)); cs->fc = fc; cs->write = write; cs->iov = iov; cs->nr_segs = nr_segs; } /* Unmap and put previous page of userspace buffer */ static void fuse_copy_finish(struct fuse_copy_state *cs) { if (cs->currbuf) { struct pipe_buffer *buf = cs->currbuf; if (cs->write) buf->len = PAGE_SIZE - cs->len; cs->currbuf = NULL; } else if (cs->pg) { if (cs->write) { flush_dcache_page(cs->pg); set_page_dirty_lock(cs->pg); } put_page(cs->pg); } cs->pg = NULL; } /* * Get another pagefull of userspace buffer, and map it to kernel * address space, and lock request */ static int fuse_copy_fill(struct fuse_copy_state *cs) { struct page *page; int err; unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); if (cs->pipebufs) { struct pipe_buffer *buf = cs->pipebufs; if (!cs->write) { err = buf->ops->confirm(cs->pipe, buf); if (err) return err; BUG_ON(!cs->nr_segs); cs->currbuf = buf; cs->pg = buf->page; cs->offset = buf->offset; cs->len = buf->len; cs->pipebufs++; cs->nr_segs--; } else { if (cs->nr_segs == cs->pipe->buffers) return -EIO; page = alloc_page(GFP_HIGHUSER); if (!page) return -ENOMEM; buf->page = page; buf->offset = 0; buf->len = 0; cs->currbuf = buf; cs->pg = page; cs->offset = 0; cs->len = PAGE_SIZE; cs->pipebufs++; cs->nr_segs++; } } else { if (!cs->seglen) { BUG_ON(!cs->nr_segs); cs->seglen = cs->iov[0].iov_len; cs->addr = (unsigned long) cs->iov[0].iov_base; cs->iov++; cs->nr_segs--; } err = get_user_pages_fast(cs->addr, 1, cs->write, &page); if (err < 0) return err; BUG_ON(err != 1); cs->pg = page; cs->offset = cs->addr % PAGE_SIZE; cs->len = min(PAGE_SIZE - cs->offset, cs->seglen); cs->seglen -= cs->len; cs->addr += cs->len; } return lock_request(cs->fc, cs->req); } /* Do as much copy to/from userspace buffer as we can */ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) { unsigned ncpy = min(*size, cs->len); if (val) { void *pgaddr = kmap_atomic(cs->pg); void *buf = pgaddr + cs->offset; if (cs->write) memcpy(buf, *val, ncpy); else memcpy(*val, buf, ncpy); kunmap_atomic(pgaddr); *val += ncpy; } *size -= ncpy; cs->len -= ncpy; cs->offset += ncpy; return ncpy; } static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | 1 << PG_reclaim))) { printk(KERN_WARNING "fuse: trying to steal weird page\n"); printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); return 1; } return 0; } static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) { int err; struct page *oldpage = *pagep; struct page *newpage; struct pipe_buffer *buf = cs->pipebufs; unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); err = buf->ops->confirm(cs->pipe, buf); if (err) return err; BUG_ON(!cs->nr_segs); cs->currbuf = buf; cs->len = buf->len; cs->pipebufs++; cs->nr_segs--; if (cs->len != PAGE_SIZE) goto out_fallback; if (buf->ops->steal(cs->pipe, buf) != 0) goto out_fallback; newpage = buf->page; if (WARN_ON(!PageUptodate(newpage))) return -EIO; ClearPageMappedToDisk(newpage); if (fuse_check_page(newpage) != 0) goto out_fallback_unlock; /* * This is a new and locked page, it shouldn't be mapped or * have any special flags on it */ if (WARN_ON(page_mapped(oldpage))) goto out_fallback_unlock; if (WARN_ON(page_has_private(oldpage))) goto out_fallback_unlock; if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) goto out_fallback_unlock; if (WARN_ON(PageMlocked(oldpage))) goto out_fallback_unlock; err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL); if (err) { unlock_page(newpage); return err; } page_cache_get(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) lru_cache_add_file(newpage); err = 0; spin_lock(&cs->fc->lock); if (cs->req->aborted) err = -ENOENT; else *pagep = newpage; spin_unlock(&cs->fc->lock); if (err) { unlock_page(newpage); page_cache_release(newpage); return err; } unlock_page(oldpage); page_cache_release(oldpage); cs->len = 0; return 0; out_fallback_unlock: unlock_page(newpage); out_fallback: cs->pg = buf->page; cs->offset = buf->offset; err = lock_request(cs->fc, cs->req); if (err) return err; return 1; } static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, unsigned offset, unsigned count) { struct pipe_buffer *buf; if (cs->nr_segs == cs->pipe->buffers) return -EIO; unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); buf = cs->pipebufs; page_cache_get(page); buf->page = page; buf->offset = offset; buf->len = count; cs->pipebufs++; cs->nr_segs++; cs->len = 0; return 0; } /* * Copy a page in the request to/from the userspace buffer. Must be * done atomically */ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, unsigned offset, unsigned count, int zeroing) { int err; struct page *page = *pagep; if (page && zeroing && count < PAGE_SIZE) clear_highpage(page); while (count) { if (cs->write && cs->pipebufs && page) { return fuse_ref_page(cs, page, offset, count); } else if (!cs->len) { if (cs->move_pages && page && offset == 0 && count == PAGE_SIZE) { err = fuse_try_move_page(cs, pagep); if (err <= 0) return err; } else { err = fuse_copy_fill(cs); if (err) return err; } } if (page) { void *mapaddr = kmap_atomic(page); void *buf = mapaddr + offset; offset += fuse_copy_do(cs, &buf, &count); kunmap_atomic(mapaddr); } else offset += fuse_copy_do(cs, NULL, &count); } if (page && !cs->write) flush_dcache_page(page); return 0; } /* Copy pages in the request to/from userspace buffer */ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, int zeroing) { unsigned i; struct fuse_req *req = cs->req; for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { int err; unsigned offset = req->page_descs[i].offset; unsigned count = min(nbytes, req->page_descs[i].length); err = fuse_copy_page(cs, &req->pages[i], offset, count, zeroing); if (err) return err; nbytes -= count; } return 0; } /* Copy a single argument in the request to/from userspace buffer */ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) { while (size) { if (!cs->len) { int err = fuse_copy_fill(cs); if (err) return err; } fuse_copy_do(cs, &val, &size); } return 0; } /* Copy request arguments to/from userspace buffer */ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, unsigned argpages, struct fuse_arg *args, int zeroing) { int err = 0; unsigned i; for (i = 0; !err && i < numargs; i++) { struct fuse_arg *arg = &args[i]; if (i == numargs - 1 && argpages) err = fuse_copy_pages(cs, arg->size, zeroing); else err = fuse_copy_one(cs, arg->value, arg->size); } return err; } static int forget_pending(struct fuse_conn *fc) { return fc->forget_list_head.next != NULL; } static int request_pending(struct fuse_conn *fc) { return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || forget_pending(fc); } /* Wait until a request is available on the pending list */ static void request_wait(struct fuse_conn *fc) __releases(fc->lock) __acquires(fc->lock) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&fc->waitq, &wait); while (fc->connected && !request_pending(fc)) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) break; spin_unlock(&fc->lock); schedule(); spin_lock(&fc->lock); } set_current_state(TASK_RUNNING); remove_wait_queue(&fc->waitq, &wait); } /* * Transfer an interrupt request to userspace * * Unlike other requests this is assembled on demand, without a need * to allocate a separate fuse_req structure. * * Called with fc->lock held, releases it */ static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes, struct fuse_req *req) __releases(fc->lock) { struct fuse_in_header ih; struct fuse_interrupt_in arg; unsigned reqsize = sizeof(ih) + sizeof(arg); int err; list_del_init(&req->intr_entry); req->intr_unique = fuse_get_unique(fc); memset(&ih, 0, sizeof(ih)); memset(&arg, 0, sizeof(arg)); ih.len = reqsize; ih.opcode = FUSE_INTERRUPT; ih.unique = req->intr_unique; arg.unique = req->in.h.unique; spin_unlock(&fc->lock); if (nbytes < reqsize) return -EINVAL; err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); fuse_copy_finish(cs); return err ? err : reqsize; } static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, unsigned max, unsigned *countp) { struct fuse_forget_link *head = fc->forget_list_head.next; struct fuse_forget_link **newhead = &head; unsigned count; for (count = 0; *newhead != NULL && count < max; count++) newhead = &(*newhead)->next; fc->forget_list_head.next = *newhead; *newhead = NULL; if (fc->forget_list_head.next == NULL) fc->forget_list_tail = &fc->forget_list_head; if (countp != NULL) *countp = count; return head; } static int fuse_read_single_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes) __releases(fc->lock) { int err; struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); struct fuse_forget_in arg = { .nlookup = forget->forget_one.nlookup, }; struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, .unique = fuse_get_unique(fc), .len = sizeof(ih) + sizeof(arg), }; spin_unlock(&fc->lock); kfree(forget); if (nbytes < ih.len) return -EINVAL; err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); fuse_copy_finish(cs); if (err) return err; return ih.len; } static int fuse_read_batch_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes) __releases(fc->lock) { int err; unsigned max_forgets; unsigned count; struct fuse_forget_link *head; struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_in_header ih = { .opcode = FUSE_BATCH_FORGET, .unique = fuse_get_unique(fc), .len = sizeof(ih) + sizeof(arg), }; if (nbytes < ih.len) { spin_unlock(&fc->lock); return -EINVAL; } max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); head = dequeue_forget(fc, max_forgets, &count); spin_unlock(&fc->lock); arg.count = count; ih.len += count * sizeof(struct fuse_forget_one); err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) err = fuse_copy_one(cs, &arg, sizeof(arg)); while (head) { struct fuse_forget_link *forget = head; if (!err) { err = fuse_copy_one(cs, &forget->forget_one, sizeof(forget->forget_one)); } head = forget->next; kfree(forget); } fuse_copy_finish(cs); if (err) return err; return ih.len; } static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes) __releases(fc->lock) { if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) return fuse_read_single_forget(fc, cs, nbytes); else return fuse_read_batch_forget(fc, cs, nbytes); } /* * Read a single request into the userspace filesystem's buffer. This * function waits until a request is available, then removes it from * the pending list and copies request data to userspace buffer. If * no reply is needed (FORGET) or request has been aborted or there * was an error during the copying then it's finished by calling * request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, struct fuse_copy_state *cs, size_t nbytes) { int err; struct fuse_req *req; struct fuse_in *in; unsigned reqsize; restart: spin_lock(&fc->lock); err = -EAGAIN; if ((file->f_flags & O_NONBLOCK) && fc->connected && !request_pending(fc)) goto err_unlock; request_wait(fc); err = -ENODEV; if (!fc->connected) goto err_unlock; err = -ERESTARTSYS; if (!request_pending(fc)) goto err_unlock; if (!list_empty(&fc->interrupts)) { req = list_entry(fc->interrupts.next, struct fuse_req, intr_entry); return fuse_read_interrupt(fc, cs, nbytes, req); } if (forget_pending(fc)) { if (list_empty(&fc->pending) || fc->forget_batch-- > 0) return fuse_read_forget(fc, cs, nbytes); if (fc->forget_batch <= -8) fc->forget_batch = 16; } req = list_entry(fc->pending.next, struct fuse_req, list); req->state = FUSE_REQ_READING; list_move(&req->list, &fc->io); in = &req->in; reqsize = in->h.len; /* If request is too large, reply with an error and restart the read */ if (nbytes < reqsize) { req->out.h.error = -EIO; /* SETXATTR is special, since it may contain too large data */ if (in->h.opcode == FUSE_SETXATTR) req->out.h.error = -E2BIG; request_end(fc, req); goto restart; } spin_unlock(&fc->lock); cs->req = req; err = fuse_copy_one(cs, &in->h, sizeof(in->h)); if (!err) err = fuse_copy_args(cs, in->numargs, in->argpages, (struct fuse_arg *) in->args, 0); fuse_copy_finish(cs); spin_lock(&fc->lock); req->locked = 0; if (req->aborted) { request_end(fc, req); return -ENODEV; } if (err) { req->out.h.error = -EIO; request_end(fc, req); return err; } if (!req->isreply) request_end(fc, req); else { req->state = FUSE_REQ_SENT; list_move_tail(&req->list, &fc->processing); if (req->interrupted) queue_interrupt(fc, req); spin_unlock(&fc->lock); } return reqsize; err_unlock: spin_unlock(&fc->lock); return err; } static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct fuse_copy_state cs; struct file *file = iocb->ki_filp; struct fuse_conn *fc = fuse_get_conn(file); if (!fc) return -EPERM; fuse_copy_init(&cs, fc, 1, iov, nr_segs); return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs)); } static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { int ret; int page_nr = 0; int do_wakeup = 0; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_conn *fc = fuse_get_conn(in); if (!fc) return -EPERM; bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; fuse_copy_init(&cs, fc, 1, NULL, 0); cs.pipebufs = bufs; cs.pipe = pipe; ret = fuse_dev_do_read(fc, in, &cs, len); if (ret < 0) goto out; ret = 0; pipe_lock(pipe); if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; goto out_unlock; } if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { ret = -EIO; goto out_unlock; } while (page_nr < cs.nr_segs) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = bufs[page_nr].page; buf->offset = bufs[page_nr].offset; buf->len = bufs[page_nr].len; /* * Need to be careful about this. Having buf->ops in module * code can Oops if the buffer persists after module unload. */ buf->ops = &nosteal_pipe_buf_ops; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->files) do_wakeup = 1; } out_unlock: pipe_unlock(pipe); if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } out: for (; page_nr < cs.nr_segs; page_nr++) page_cache_release(bufs[page_nr].page); kfree(bufs); return ret; } static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_poll_wakeup_out outarg; int err = -EINVAL; if (size != sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; fuse_copy_finish(cs); return fuse_notify_poll_wakeup(fc, &outarg); err: fuse_copy_finish(cs); return err; } static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_inval_inode_out outarg; int err = -EINVAL; if (size != sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; fuse_copy_finish(cs); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) { err = fuse_reverse_inval_inode(fc->sb, outarg.ino, outarg.off, outarg.len); } up_read(&fc->killsb); return err; err: fuse_copy_finish(cs); return err; } static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_inval_entry_out outarg; int err = -ENOMEM; char *buf; struct qstr name; buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); if (!buf) goto err; err = -EINVAL; if (size < sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; err = -ENAMETOOLONG; if (outarg.namelen > FUSE_NAME_MAX) goto err; err = -EINVAL; if (size != sizeof(outarg) + outarg.namelen + 1) goto err; name.name = buf; name.len = outarg.namelen; err = fuse_copy_one(cs, buf, outarg.namelen + 1); if (err) goto err; fuse_copy_finish(cs); buf[outarg.namelen] = 0; name.hash = full_name_hash(name.name, name.len); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); up_read(&fc->killsb); kfree(buf); return err; err: kfree(buf); fuse_copy_finish(cs); return err; } static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_delete_out outarg; int err = -ENOMEM; char *buf; struct qstr name; buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL); if (!buf) goto err; err = -EINVAL; if (size < sizeof(outarg)) goto err; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto err; err = -ENAMETOOLONG; if (outarg.namelen > FUSE_NAME_MAX) goto err; err = -EINVAL; if (size != sizeof(outarg) + outarg.namelen + 1) goto err; name.name = buf; name.len = outarg.namelen; err = fuse_copy_one(cs, buf, outarg.namelen + 1); if (err) goto err; fuse_copy_finish(cs); buf[outarg.namelen] = 0; name.hash = full_name_hash(name.name, name.len); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) err = fuse_reverse_inval_entry(fc->sb, outarg.parent, outarg.child, &name); up_read(&fc->killsb); kfree(buf); return err; err: kfree(buf); fuse_copy_finish(cs); return err; } static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_store_out outarg; struct inode *inode; struct address_space *mapping; u64 nodeid; int err; pgoff_t index; unsigned int offset; unsigned int num; loff_t file_size; loff_t end; err = -EINVAL; if (size < sizeof(outarg)) goto out_finish; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto out_finish; err = -EINVAL; if (size - sizeof(outarg) != outarg.size) goto out_finish; nodeid = outarg.nodeid; down_read(&fc->killsb); err = -ENOENT; if (!fc->sb) goto out_up_killsb; inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); if (!inode) goto out_up_killsb; mapping = inode->i_mapping; index = outarg.offset >> PAGE_CACHE_SHIFT; offset = outarg.offset & ~PAGE_CACHE_MASK; file_size = i_size_read(inode); end = outarg.offset + outarg.size; if (end > file_size) { file_size = end; fuse_write_update_size(inode, file_size); } num = outarg.size; while (num) { struct page *page; unsigned int this_num; err = -ENOMEM; page = find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); if (!page) goto out_iput; this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); if (!err && offset == 0 && (this_num == PAGE_CACHE_SIZE || file_size == end)) SetPageUptodate(page); unlock_page(page); page_cache_release(page); if (err) goto out_iput; num -= this_num; offset = 0; index++; } err = 0; out_iput: iput(inode); out_up_killsb: up_read(&fc->killsb); out_finish: fuse_copy_finish(cs); return err; } static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) { release_pages(req->pages, req->num_pages, false); } static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, struct fuse_notify_retrieve_out *outarg) { int err; struct address_space *mapping = inode->i_mapping; struct fuse_req *req; pgoff_t index; loff_t file_size; unsigned int num; unsigned int offset; size_t total_len = 0; int num_pages; offset = outarg->offset & ~PAGE_CACHE_MASK; file_size = i_size_read(inode); num = outarg->size; if (outarg->offset > file_size) num = 0; else if (outarg->offset + num > file_size) num = file_size - outarg->offset; num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); req = fuse_get_req(fc, num_pages); if (IS_ERR(req)) return PTR_ERR(req); req->in.h.opcode = FUSE_NOTIFY_REPLY; req->in.h.nodeid = outarg->nodeid; req->in.numargs = 2; req->in.argpages = 1; req->page_descs[0].offset = offset; req->end = fuse_retrieve_end; index = outarg->offset >> PAGE_CACHE_SHIFT; while (num && req->num_pages < num_pages) { struct page *page; unsigned int this_num; page = find_get_page(mapping, index); if (!page) break; this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = this_num; req->num_pages++; offset = 0; num -= this_num; total_len += this_num; index++; } req->misc.retrieve_in.offset = outarg->offset; req->misc.retrieve_in.size = total_len; req->in.args[0].size = sizeof(req->misc.retrieve_in); req->in.args[0].value = &req->misc.retrieve_in; req->in.args[1].size = total_len; err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); if (err) fuse_retrieve_end(fc, req); return err; } static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { struct fuse_notify_retrieve_out outarg; struct inode *inode; int err; err = -EINVAL; if (size != sizeof(outarg)) goto copy_finish; err = fuse_copy_one(cs, &outarg, sizeof(outarg)); if (err) goto copy_finish; fuse_copy_finish(cs); down_read(&fc->killsb); err = -ENOENT; if (fc->sb) { u64 nodeid = outarg.nodeid; inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); if (inode) { err = fuse_retrieve(fc, inode, &outarg); iput(inode); } } up_read(&fc->killsb); return err; copy_finish: fuse_copy_finish(cs); return err; } static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { switch (code) { case FUSE_NOTIFY_POLL: return fuse_notify_poll(fc, size, cs); case FUSE_NOTIFY_INVAL_INODE: return fuse_notify_inval_inode(fc, size, cs); case FUSE_NOTIFY_INVAL_ENTRY: return fuse_notify_inval_entry(fc, size, cs); case FUSE_NOTIFY_STORE: return fuse_notify_store(fc, size, cs); case FUSE_NOTIFY_RETRIEVE: return fuse_notify_retrieve(fc, size, cs); case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); default: fuse_copy_finish(cs); return -EINVAL; } } /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) { struct fuse_req *req; list_for_each_entry(req, &fc->processing, list) { if (req->in.h.unique == unique || req->intr_unique == unique) return req; } return NULL; } static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, unsigned nbytes) { unsigned reqsize = sizeof(struct fuse_out_header); if (out->h.error) return nbytes != reqsize ? -EINVAL : 0; reqsize += len_args(out->numargs, out->args); if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) return -EINVAL; else if (reqsize > nbytes) { struct fuse_arg *lastarg = &out->args[out->numargs-1]; unsigned diffsize = reqsize - nbytes; if (diffsize > lastarg->size) return -EINVAL; lastarg->size -= diffsize; } return fuse_copy_args(cs, out->numargs, out->argpages, out->args, out->page_zeroing); } /* * Write a single reply to a request. First the header is copied from * the write buffer. The request is then searched on the processing * list by the unique ID found in the header. If found, then remove * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, struct fuse_copy_state *cs, size_t nbytes) { int err; struct fuse_req *req; struct fuse_out_header oh; if (nbytes < sizeof(struct fuse_out_header)) return -EINVAL; err = fuse_copy_one(cs, &oh, sizeof(oh)); if (err) goto err_finish; err = -EINVAL; if (oh.len != nbytes) goto err_finish; /* * Zero oh.unique indicates unsolicited notification message * and error contains notification code. */ if (!oh.unique) { err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); return err ? err : nbytes; } err = -EINVAL; if (oh.error <= -1000 || oh.error > 0) goto err_finish; spin_lock(&fc->lock); err = -ENOENT; if (!fc->connected) goto err_unlock; req = request_find(fc, oh.unique); if (!req) goto err_unlock; if (req->aborted) { spin_unlock(&fc->lock); fuse_copy_finish(cs); spin_lock(&fc->lock); request_end(fc, req); return -ENOENT; } /* Is it an interrupt reply? */ if (req->intr_unique == oh.unique) { err = -EINVAL; if (nbytes != sizeof(struct fuse_out_header)) goto err_unlock; if (oh.error == -ENOSYS) fc->no_interrupt = 1; else if (oh.error == -EAGAIN) queue_interrupt(fc, req); spin_unlock(&fc->lock); fuse_copy_finish(cs); return nbytes; } req->state = FUSE_REQ_WRITING; list_move(&req->list, &fc->io); req->out.h = oh; req->locked = 1; cs->req = req; if (!req->out.page_replace) cs->move_pages = 0; spin_unlock(&fc->lock); err = copy_out_args(cs, &req->out, nbytes); fuse_copy_finish(cs); spin_lock(&fc->lock); req->locked = 0; if (!err) { if (req->aborted) err = -ENOENT; } else if (!req->aborted) req->out.h.error = -EIO; request_end(fc, req); return err ? err : nbytes; err_unlock: spin_unlock(&fc->lock); err_finish: fuse_copy_finish(cs); return err; } static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct fuse_copy_state cs; struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); if (!fc) return -EPERM; fuse_copy_init(&cs, fc, 0, iov, nr_segs); return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); } static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { unsigned nbuf; unsigned idx; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_conn *fc; size_t rem; ssize_t ret; fc = fuse_get_conn(out); if (!fc) return -EPERM; bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; pipe_lock(pipe); nbuf = 0; rem = 0; for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; ret = -EINVAL; if (rem < len) { pipe_unlock(pipe); goto out; } rem = len; while (rem) { struct pipe_buffer *ibuf; struct pipe_buffer *obuf; BUG_ON(nbuf >= pipe->buffers); BUG_ON(!pipe->nrbufs); ibuf = &pipe->bufs[pipe->curbuf]; obuf = &bufs[nbuf]; if (rem >= ibuf->len) { *obuf = *ibuf; ibuf->ops = NULL; pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; } else { ibuf->ops->get(pipe, ibuf); *obuf = *ibuf; obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = rem; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } nbuf++; rem -= obuf->len; } pipe_unlock(pipe); fuse_copy_init(&cs, fc, 0, NULL, nbuf); cs.pipebufs = bufs; cs.pipe = pipe; if (flags & SPLICE_F_MOVE) cs.move_pages = 1; ret = fuse_dev_do_write(fc, &cs, len); for (idx = 0; idx < nbuf; idx++) { struct pipe_buffer *buf = &bufs[idx]; buf->ops->release(pipe, buf); } out: kfree(bufs); return ret; } static unsigned fuse_dev_poll(struct file *file, poll_table *wait) { unsigned mask = POLLOUT | POLLWRNORM; struct fuse_conn *fc = fuse_get_conn(file); if (!fc) return POLLERR; poll_wait(file, &fc->waitq, wait); spin_lock(&fc->lock); if (!fc->connected) mask = POLLERR; else if (request_pending(fc)) mask |= POLLIN | POLLRDNORM; spin_unlock(&fc->lock); return mask; } /* * Abort all requests on the given list (pending or processing) * * This function releases and reacquires fc->lock */ static void end_requests(struct fuse_conn *fc, struct list_head *head) __releases(fc->lock) __acquires(fc->lock) { while (!list_empty(head)) { struct fuse_req *req; req = list_entry(head->next, struct fuse_req, list); req->out.h.error = -ECONNABORTED; request_end(fc, req); spin_lock(&fc->lock); } } /* * Abort requests under I/O * * The requests are set to aborted and finished, and the request * waiter is woken up. This will make request_wait_answer() wait * until the request is unlocked and then return. * * If the request is asynchronous, then the end function needs to be * called after waiting for the request to be unlocked (if it was * locked). */ static void end_io_requests(struct fuse_conn *fc) __releases(fc->lock) __acquires(fc->lock) { while (!list_empty(&fc->io)) { struct fuse_req *req = list_entry(fc->io.next, struct fuse_req, list); void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; req->aborted = 1; req->out.h.error = -ECONNABORTED; req->state = FUSE_REQ_FINISHED; list_del_init(&req->list); wake_up(&req->waitq); if (end) { req->end = NULL; __fuse_get_request(req); spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); end(fc, req); fuse_put_request(fc, req); spin_lock(&fc->lock); } } } static void end_queued_requests(struct fuse_conn *fc) __releases(fc->lock) __acquires(fc->lock) { fc->max_background = UINT_MAX; flush_bg_queue(fc); end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); while (forget_pending(fc)) kfree(dequeue_forget(fc, 1, NULL)); } static void end_polls(struct fuse_conn *fc) { struct rb_node *p; p = rb_first(&fc->polled_files); while (p) { struct fuse_file *ff; ff = rb_entry(p, struct fuse_file, polled_node); wake_up_interruptible_all(&ff->poll_wait); p = rb_next(p); } } /* * Abort all requests. * * Emergency exit in case of a malicious or accidental deadlock, or * just a hung filesystem. * * The same effect is usually achievable through killing the * filesystem daemon and all users of the filesystem. The exception * is the combination of an asynchronous request and the tricky * deadlock (see Documentation/filesystems/fuse.txt). * * During the aborting, progression of requests from the pending and * processing lists onto the io list, and progression of new requests * onto the pending list is prevented by req->connected being false. * * Progression of requests under I/O to the processing list is * prevented by the req->aborted flag being true for these requests. * For this reason requests on the io list must be aborted first. */ void fuse_abort_conn(struct fuse_conn *fc) { spin_lock(&fc->lock); if (fc->connected) { fc->connected = 0; fc->blocked = 0; fuse_set_initialized(fc); end_io_requests(fc); end_queued_requests(fc); end_polls(fc); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); } spin_unlock(&fc->lock); } EXPORT_SYMBOL_GPL(fuse_abort_conn); int fuse_dev_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = fuse_get_conn(file); if (fc) { spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; fuse_set_initialized(fc); end_queued_requests(fc); end_polls(fc); wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); fuse_conn_put(fc); } return 0; } EXPORT_SYMBOL_GPL(fuse_dev_release); static int fuse_dev_fasync(int fd, struct file *file, int on) { struct fuse_conn *fc = fuse_get_conn(file); if (!fc) return -EPERM; /* No locking - fasync_helper does its own locking */ return fasync_helper(fd, file, on, &fc->fasync); } const struct file_operations fuse_dev_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .read = do_sync_read, .aio_read = fuse_dev_read, .splice_read = fuse_dev_splice_read, .write = do_sync_write, .aio_write = fuse_dev_write, .splice_write = fuse_dev_splice_write, .poll = fuse_dev_poll, .release = fuse_dev_release, .fasync = fuse_dev_fasync, }; EXPORT_SYMBOL_GPL(fuse_dev_operations); static struct miscdevice fuse_miscdevice = { .minor = FUSE_MINOR, .name = "fuse", .fops = &fuse_dev_operations, }; int __init fuse_dev_init(void) { int err = -ENOMEM; fuse_req_cachep = kmem_cache_create("fuse_request", sizeof(struct fuse_req), 0, 0, NULL); if (!fuse_req_cachep) goto out; err = misc_register(&fuse_miscdevice); if (err) goto out_cache_clean; return 0; out_cache_clean: kmem_cache_destroy(fuse_req_cachep); out: return err; } void fuse_dev_cleanup(void) { misc_deregister(&fuse_miscdevice); kmem_cache_destroy(fuse_req_cachep); }
gpl-2.0
jtoppins/net-next
drivers/pci/host/pcie-rcar.c
91
25967
/* * PCIe driver for Renesas R-Car SoCs * Copyright (C) 2014 Renesas Electronics Europe Ltd * * Based on: * arch/sh/drivers/pci/pcie-sh7786.c * arch/sh/drivers/pci/ops-sh7786.c * Copyright (C) 2009 - 2011 Paul Mundt * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #define DRV_NAME "rcar-pcie" #define PCIECAR 0x000010 #define PCIECCTLR 0x000018 #define CONFIG_SEND_ENABLE (1 << 31) #define TYPE0 (0 << 8) #define TYPE1 (1 << 8) #define PCIECDR 0x000020 #define PCIEMSR 0x000028 #define PCIEINTXR 0x000400 #define PCIEMSITXR 0x000840 /* Transfer control */ #define PCIETCTLR 0x02000 #define CFINIT 1 #define PCIETSTR 0x02004 #define DATA_LINK_ACTIVE 1 #define PCIEERRFR 0x02020 #define UNSUPPORTED_REQUEST (1 << 4) #define PCIEMSIFR 0x02044 #define PCIEMSIALR 0x02048 #define MSIFE 1 #define PCIEMSIAUR 0x0204c #define PCIEMSIIER 0x02050 /* root port address */ #define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) /* local address reg & mask */ #define PCIELAR(x) (0x02200 + ((x) * 0x20)) #define PCIELAMR(x) (0x02208 + ((x) * 0x20)) #define LAM_PREFETCH (1 << 3) #define LAM_64BIT (1 << 2) #define LAR_ENABLE (1 << 1) /* PCIe address reg & mask */ #define PCIEPALR(x) (0x03400 + ((x) * 0x20)) #define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) #define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) #define PAR_ENABLE (1 << 31) #define IO_SPACE (1 << 8) /* Configuration */ #define PCICONF(x) (0x010000 + ((x) * 0x4)) #define PMCAP(x) (0x010040 + ((x) * 0x4)) #define EXPCAP(x) (0x010070 + ((x) * 0x4)) #define VCCAP(x) (0x010100 + ((x) * 0x4)) /* link layer */ #define IDSETR1 0x011004 #define TLCTLR 0x011048 #define MACSR 0x011054 #define MACCTLR 0x011058 #define SCRAMBLE_DISABLE (1 << 27) /* R-Car H1 PHY */ #define H1_PCIEPHYADRR 0x04000c #define WRITE_CMD (1 << 16) #define PHY_ACK (1 << 24) #define RATE_POS 12 #define LANE_POS 8 #define ADR_POS 0 #define H1_PCIEPHYDOUTR 0x040014 #define H1_PCIEPHYSR 0x040018 #define INT_PCI_MSI_NR 32 #define RCONF(x) (PCICONF(0)+(x)) #define RPMCAP(x) (PMCAP(0)+(x)) #define REXPCAP(x) (EXPCAP(0)+(x)) #define RVCCAP(x) (VCCAP(0)+(x)) #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) #define RCAR_PCI_MAX_RESOURCES 4 #define MAX_NR_INBOUND_MAPS 6 static unsigned long global_io_offset; struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; struct msi_controller chip; unsigned long pages; struct mutex lock; int irq1; int irq2; }; static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) { return container_of(chip, struct rcar_msi, chip); } /* Structure representing the PCIe interface */ /* * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI * sysdata. Add pci_sys_data as the first element in struct gen_pci so * that when we use a gen_pci pointer as sysdata, it is also a pointer to * a struct pci_sys_data. */ struct rcar_pcie { #ifdef CONFIG_ARM struct pci_sys_data sys; #endif struct device *dev; void __iomem *base; struct resource res[RCAR_PCI_MAX_RESOURCES]; struct resource busn; int root_bus_nr; struct clk *clk; struct clk *bus_clk; struct rcar_msi msi; }; static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, unsigned long reg) { writel(val, pcie->base + reg); } static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned long reg) { return readl(pcie->base + reg); } enum { RCAR_PCI_ACCESS_READ, RCAR_PCI_ACCESS_WRITE, }; static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) { int shift = 8 * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); val &= ~(mask << shift); val |= data << shift; rcar_pci_write_reg(pcie, val, where & ~3); } static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) { int shift = 8 * (where & 3); u32 val = rcar_pci_read_reg(pcie, where & ~3); return val >> shift; } /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_config_access(struct rcar_pcie *pcie, unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { int dev, func, reg, index; dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); reg = where & ~3; index = reg / 4; /* * While each channel has its own memory-mapped extended config * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any * controller initiated target transfer to its own config space * result in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() * value, we cheat a bit here and bind the controller's config * space to devfn 0 in order to enable self-enumeration. In this * case the regular ECAR/ECDR path is sidelined and the mangled * config access itself is initiated as an internal bus transaction. */ if (pci_is_root_bus(bus)) { if (dev != 0) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) { *data = rcar_pci_read_reg(pcie, PCICONF(index)); } else { /* Keep an eye out for changes to the root bus number */ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) pcie->root_bus_nr = *data & 0xff; rcar_pci_write_reg(pcie, *data, PCICONF(index)); } return PCIBIOS_SUCCESSFUL; } if (pcie->root_bus_nr < 0) return PCIBIOS_DEVICE_NOT_FOUND; /* Clear errors */ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); /* Set the PIO address */ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); /* Enable the configuration access */ if (bus->parent->number == pcie->root_bus_nr) rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); else rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); /* Check for errors */ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) return PCIBIOS_DEVICE_NOT_FOUND; /* Check for master and target aborts */ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == RCAR_PCI_ACCESS_READ) *data = rcar_pci_read_reg(pcie, PCIECDR); else rcar_pci_write_reg(pcie, *data, PCIECDR); /* Disable the configuration access */ rcar_pci_write_reg(pcie, 0, PCIECCTLR); return PCIBIOS_SUCCESSFUL; } static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct rcar_pcie *pcie = bus->sysdata; int ret; ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, bus, devfn, where, val); if (ret != PCIBIOS_SUCCESSFUL) { *val = 0xffffffff; return ret; } if (size == 1) *val = (*val >> (8 * (where & 3))) & 0xff; else if (size == 2) *val = (*val >> (8 * (where & 2))) & 0xffff; dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", bus->number, devfn, where, size, (unsigned long)*val); return ret; } /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct rcar_pcie *pcie = bus->sysdata; int shift, ret; u32 data; ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) return ret; dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", bus->number, devfn, where, size, (unsigned long)val); if (size == 1) { shift = 8 * (where & 3); data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { shift = 8 * (where & 2); data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else data = val; ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, bus, devfn, where, &data); return ret; } static struct pci_ops rcar_pcie_ops = { .read = rcar_pcie_read_conf, .write = rcar_pcie_write_conf, }; static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie) { struct resource *res = &pcie->res[win]; /* Setup PCIe address space mappings for each resource */ resource_size_t size; resource_size_t res_start; u32 mask; rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); /* * The PAMR mask is calculated in units of 128Bytes, which * keeps things pretty simple. */ size = resource_size(res); mask = (roundup_pow_of_two(size) / SZ_128) - 1; rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); if (res->flags & IORESOURCE_IO) res_start = pci_pio_to_address(res->start); else res_start = res->start; rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, PCIEPALR(win)); /* First resource is for IO */ mask = PAR_ENABLE; if (res->flags & IORESOURCE_IO) mask |= IO_SPACE; rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); } static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie) { struct resource *res; int i; pcie->root_bus_nr = pcie->busn.start; /* Setup PCI resources */ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { res = &pcie->res[i]; if (!res->flags) continue; rcar_pcie_setup_window(i, pcie); if (res->flags & IORESOURCE_IO) { phys_addr_t io_start = pci_pio_to_address(res->start); pci_ioremap_io(global_io_offset, io_start); global_io_offset += SZ_64K; } pci_add_resource(resource, res); } pci_add_resource(resource, &pcie->busn); return 1; } static int rcar_pcie_enable(struct rcar_pcie *pcie) { struct pci_bus *bus, *child; LIST_HEAD(res); rcar_pcie_setup(&res, pcie); /* Do not reassign resources if probe only */ if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); if (IS_ENABLED(CONFIG_PCI_MSI)) bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr, &rcar_pcie_ops, pcie, &res, &pcie->msi.chip); else bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr, &rcar_pcie_ops, pcie, &res); if (!bus) { dev_err(pcie->dev, "Scanning rootbus failed"); return -ENODEV; } pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); if (!pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); return 0; } static int phy_wait_for_ack(struct rcar_pcie *pcie) { unsigned int timeout = 100; while (timeout--) { if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) return 0; udelay(100); } dev_err(pcie->dev, "Access to PCIe phy timed out\n"); return -ETIMEDOUT; } static void phy_write_reg(struct rcar_pcie *pcie, unsigned int rate, unsigned int addr, unsigned int lane, unsigned int data) { unsigned long phyaddr; phyaddr = WRITE_CMD | ((rate & 1) << RATE_POS) | ((lane & 0xf) << LANE_POS) | ((addr & 0xff) << ADR_POS); /* Set write data */ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); /* Ignore errors as they will be dealt with if the data link is down */ phy_wait_for_ack(pcie); /* Clear command */ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); /* Ignore errors as they will be dealt with if the data link is down */ phy_wait_for_ack(pcie); } static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) { unsigned int timeout = 10; while (timeout--) { if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) return 0; msleep(5); } return -ETIMEDOUT; } static int rcar_pcie_hw_init(struct rcar_pcie *pcie) { int err; /* Begin initialization */ rcar_pci_write_reg(pcie, 0, PCIETCTLR); /* Set mode */ rcar_pci_write_reg(pcie, 1, PCIEMSR); /* * Initial header for port config space is type 1, set the device * class to match. Hardware takes care of propagating the IDSETR * settings, so there is no need to bother with a quirk. */ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); /* * Setup Secondary Bus Number & Subordinate Bus Number, even though * they aren't used, to avoid bridge being detected as broken. */ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); /* Initialize default capabilities. */ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, PCI_HEADER_TYPE_BRIDGE); /* Enable data link layer active state reporting */ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, PCI_EXP_LNKCAP_DLLLARC); /* Write out the physical slot number = 0 */ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); /* Set the completion timer timeout to the maximum 50ms. */ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); /* Terminate list of capabilities (Next Capability Offset=0) */ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); /* Enable MSI */ if (IS_ENABLED(CONFIG_PCI_MSI)) rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); /* Finish initialization - establish a PCI Express link */ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); /* This will timeout if we don't have a link. */ err = rcar_pcie_wait_for_dl(pcie); if (err) return err; /* Enable INTx interrupts */ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); wmb(); return 0; } static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) { unsigned int timeout = 10; /* Initialize the phy */ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); while (timeout--) { if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR)) return rcar_pcie_hw_init(pcie); msleep(5); } return -ETIMEDOUT; } static int rcar_msi_alloc(struct rcar_msi *chip) { int msi; mutex_lock(&chip->lock); msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); if (msi < INT_PCI_MSI_NR) set_bit(msi, chip->used); else msi = -ENOSPC; mutex_unlock(&chip->lock); return msi; } static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) { mutex_lock(&chip->lock); clear_bit(irq, chip->used); mutex_unlock(&chip->lock); } static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) { struct rcar_pcie *pcie = data; struct rcar_msi *msi = &pcie->msi; unsigned long reg; reg = rcar_pci_read_reg(pcie, PCIEMSIFR); /* MSI & INTx share an interrupt - we only handle MSI here */ if (!reg) return IRQ_NONE; while (reg) { unsigned int index = find_first_bit(&reg, 32); unsigned int irq; /* clear the interrupt */ rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); irq = irq_find_mapping(msi->domain, index); if (irq) { if (test_bit(index, msi->used)) generic_handle_irq(irq); else dev_info(pcie->dev, "unhandled MSI\n"); } else { /* Unknown MSI, just clear it */ dev_dbg(pcie->dev, "unexpected MSI\n"); } /* see if there's any more pending in this vector */ reg = rcar_pci_read_reg(pcie, PCIEMSIFR); } return IRQ_HANDLED; } static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, struct msi_desc *desc) { struct rcar_msi *msi = to_rcar_msi(chip); struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); struct msi_msg msg; unsigned int irq; int hwirq; hwirq = rcar_msi_alloc(msi); if (hwirq < 0) return hwirq; irq = irq_create_mapping(msi->domain, hwirq); if (!irq) { rcar_msi_free(msi, hwirq); return -EINVAL; } irq_set_msi_desc(irq, desc); msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); msg.data = hwirq; pci_write_msi_msg(irq, &msg); return 0; } static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) { struct rcar_msi *msi = to_rcar_msi(chip); struct irq_data *d = irq_get_irq_data(irq); rcar_msi_free(msi, d->hwirq); } static struct irq_chip rcar_msi_irq_chip = { .name = "R-Car PCIe MSI", .irq_enable = pci_msi_unmask_irq, .irq_disable = pci_msi_mask_irq, .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops msi_domain_ops = { .map = rcar_msi_map, }; static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); struct rcar_msi *msi = &pcie->msi; unsigned long base; int err; mutex_init(&msi->lock); msi->chip.dev = pcie->dev; msi->chip.setup_irq = rcar_msi_setup_irq; msi->chip.teardown_irq = rcar_msi_teardown_irq; msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, &msi_domain_ops, &msi->chip); if (!msi->domain) { dev_err(&pdev->dev, "failed to create IRQ domain\n"); return -ENOMEM; } /* Two irqs are for MSI, but they are also used for non-MSI irqs */ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, IRQF_SHARED, rcar_msi_irq_chip.name, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); goto err; } err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq, IRQF_SHARED, rcar_msi_irq_chip.name, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); goto err; } /* setup MSI data target */ msi->pages = __get_free_pages(GFP_KERNEL, 0); base = virt_to_phys((void *)msi->pages); rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); /* enable all MSI interrupts */ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); return 0; err: irq_domain_remove(msi->domain); return err; } static int rcar_pcie_get_resources(struct platform_device *pdev, struct rcar_pcie *pcie) { struct resource res; int err, i; err = of_address_to_resource(pdev->dev.of_node, 0, &res); if (err) return err; pcie->clk = devm_clk_get(&pdev->dev, "pcie"); if (IS_ERR(pcie->clk)) { dev_err(pcie->dev, "cannot get platform clock\n"); return PTR_ERR(pcie->clk); } err = clk_prepare_enable(pcie->clk); if (err) goto fail_clk; pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); if (IS_ERR(pcie->bus_clk)) { dev_err(pcie->dev, "cannot get pcie bus clock\n"); err = PTR_ERR(pcie->bus_clk); goto fail_clk; } err = clk_prepare_enable(pcie->bus_clk); if (err) goto err_map_reg; i = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!i) { dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_map_reg; } pcie->msi.irq1 = i; i = irq_of_parse_and_map(pdev->dev.of_node, 1); if (!i) { dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); err = -ENOENT; goto err_map_reg; } pcie->msi.irq2 = i; pcie->base = devm_ioremap_resource(&pdev->dev, &res); if (IS_ERR(pcie->base)) { err = PTR_ERR(pcie->base); goto err_map_reg; } return 0; err_map_reg: clk_disable_unprepare(pcie->bus_clk); fail_clk: clk_disable_unprepare(pcie->clk); return err; } static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, struct of_pci_range *range, int *index) { u64 restype = range->flags; u64 cpu_addr = range->cpu_addr; u64 cpu_end = range->cpu_addr + range->size; u64 pci_addr = range->pci_addr; u32 flags = LAM_64BIT | LAR_ENABLE; u64 mask; u64 size; int idx = *index; if (restype & IORESOURCE_PREFETCH) flags |= LAM_PREFETCH; /* * If the size of the range is larger than the alignment of the start * address, we have to use multiple entries to perform the mapping. */ if (cpu_addr > 0) { unsigned long nr_zeros = __ffs64(cpu_addr); u64 alignment = 1ULL << nr_zeros; size = min(range->size, alignment); } else { size = range->size; } /* Hardware supports max 4GiB inbound region */ size = min(size, 1ULL << 32); mask = roundup_pow_of_two(size) - 1; mask &= ~0xf; while (cpu_addr < cpu_end) { /* * Set up 64-bit inbound regions as the range parser doesn't * distinguish between 32 and 64-bit types. */ rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); pci_addr += size; cpu_addr += size; idx += 2; if (idx > MAX_NR_INBOUND_MAPS) { dev_err(pcie->dev, "Failed to map inbound regions!\n"); return -EINVAL; } } *index = idx; return 0; } static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, struct device_node *node) { const int na = 3, ns = 2; int rlen; parser->node = node; parser->pna = of_n_addr_cells(node); parser->np = parser->pna + na + ns; parser->range = of_get_property(node, "dma-ranges", &rlen); if (!parser->range) return -ENOENT; parser->end = parser->range + rlen / sizeof(__be32); return 0; } static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, struct device_node *np) { struct of_pci_range range; struct of_pci_range_parser parser; int index = 0; int err; if (pci_dma_range_parser_init(&parser, np)) return -EINVAL; /* Get the dma-ranges from DT */ for_each_of_pci_range(&parser, &range) { u64 end = range.cpu_addr + range.size - 1; dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", range.flags, range.cpu_addr, end, range.pci_addr); err = rcar_pcie_inbound_ranges(pcie, &range, &index); if (err) return err; } return 0; } static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init }, { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init }, {}, }; MODULE_DEVICE_TABLE(of, rcar_pcie_of_match); static int rcar_pcie_probe(struct platform_device *pdev) { struct rcar_pcie *pcie; unsigned int data; struct of_pci_range range; struct of_pci_range_parser parser; const struct of_device_id *of_id; int err, win = 0; int (*hw_init_fn)(struct rcar_pcie *); pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->dev = &pdev->dev; platform_set_drvdata(pdev, pcie); /* Get the bus range */ if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) { dev_err(&pdev->dev, "failed to parse bus-range property\n"); return -EINVAL; } if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) { dev_err(&pdev->dev, "missing ranges property\n"); return -EINVAL; } err = rcar_pcie_get_resources(pdev, pcie); if (err < 0) { dev_err(&pdev->dev, "failed to request resources: %d\n", err); return err; } for_each_of_pci_range(&parser, &range) { err = of_pci_range_to_resource(&range, pdev->dev.of_node, &pcie->res[win++]); if (err < 0) return err; if (win > RCAR_PCI_MAX_RESOURCES) break; } err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); if (err) return err; if (IS_ENABLED(CONFIG_PCI_MSI)) { err = rcar_pcie_enable_msi(pcie); if (err < 0) { dev_err(&pdev->dev, "failed to enable MSI support: %d\n", err); return err; } } of_id = of_match_device(rcar_pcie_of_match, pcie->dev); if (!of_id || !of_id->data) return -EINVAL; hw_init_fn = of_id->data; /* Failure to get a link might just be that no cards are inserted */ err = hw_init_fn(pcie); if (err) { dev_info(&pdev->dev, "PCIe link down\n"); return 0; } data = rcar_pci_read_reg(pcie, MACSR); dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); return rcar_pcie_enable(pcie); } static struct platform_driver rcar_pcie_driver = { .driver = { .name = DRV_NAME, .of_match_table = rcar_pcie_of_match, .suppress_bind_attrs = true, }, .probe = rcar_pcie_probe, }; module_platform_driver(rcar_pcie_driver); MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>"); MODULE_DESCRIPTION("Renesas R-Car PCIe driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
ennarr/linux-kernel
arch/m68k/kernel/setup_no.c
91
9010
/* * linux/arch/m68knommu/kernel/setup.c * * Copyright (C) 1999-2007 Greg Ungerer (gerg@snapgear.com) * Copyright (C) 1998,1999 D. Jeff Dionne <jeff@uClinux.org> * Copyleft ()) 2000 James D. Schettine {james@telos-systems.com} * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com> * Copyright (C) 1995 Hamish Macdonald * Copyright (C) 2000 Lineo Inc. (www.lineo.com) * Copyright (C) 2001 Lineo, Inc. <www.lineo.com> * * 68VZ328 Fixes/support Evan Stawnyczy <e@lineo.ca> */ /* * This file handles the architecture-dependent parts of system setup */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/root_dev.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/machdep.h> #include <asm/pgtable.h> #include <asm/sections.h> unsigned long memory_start; unsigned long memory_end; EXPORT_SYMBOL(memory_start); EXPORT_SYMBOL(memory_end); char __initdata command_line[COMMAND_LINE_SIZE]; /* machine dependent timer functions */ int (*mach_set_clock_mmss)(unsigned long); /* machine dependent reboot functions */ void (*mach_reset)(void); void (*mach_halt)(void); void (*mach_power_off)(void); #ifdef CONFIG_M68328 #define CPU_NAME "MC68328" #endif #ifdef CONFIG_M68EZ328 #define CPU_NAME "MC68EZ328" #endif #ifdef CONFIG_M68VZ328 #define CPU_NAME "MC68VZ328" #endif #ifdef CONFIG_M68360 #define CPU_NAME "MC68360" #endif #ifndef CPU_NAME #define CPU_NAME "UNKNOWN" #endif /* * Different cores have different instruction execution timings. * The old/traditional 68000 cores are basically all the same, at 16. * The ColdFire cores vary a little, their values are defined in their * headers. We default to the standard 68000 value here. */ #ifndef CPU_INSTR_PER_JIFFY #define CPU_INSTR_PER_JIFFY 16 #endif #if defined(CONFIG_UBOOT) /* * parse_uboot_commandline * * Copies u-boot commandline arguments and store them in the proper linux * variables. * * Assumes: * _init_sp global contains the address in the stack pointer when the * kernel starts (see head.S::_start) * * U-Boot calling convention: * (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end); * * _init_sp can be parsed as such * * _init_sp+00 = u-boot cmd after jsr into kernel (skip) * _init_sp+04 = &kernel board_info (residual data) * _init_sp+08 = &initrd_start * _init_sp+12 = &initrd_end * _init_sp+16 = &cmd_start * _init_sp+20 = &cmd_end * * This also assumes that the memory locations pointed to are still * unmodified. U-boot places them near the end of external SDRAM. * * Argument(s): * commandp = the linux commandline arg container to fill. * size = the sizeof commandp. * * Returns: */ void parse_uboot_commandline(char *commandp, int size) { extern unsigned long _init_sp; unsigned long *sp; unsigned long uboot_kbd; unsigned long uboot_initrd_start, uboot_initrd_end; unsigned long uboot_cmd_start, uboot_cmd_end; sp = (unsigned long *)_init_sp; uboot_kbd = sp[1]; uboot_initrd_start = sp[2]; uboot_initrd_end = sp[3]; uboot_cmd_start = sp[4]; uboot_cmd_end = sp[5]; if (uboot_cmd_start && uboot_cmd_end) strncpy(commandp, (const char *)uboot_cmd_start, size); #if defined(CONFIG_BLK_DEV_INITRD) if (uboot_initrd_start && uboot_initrd_end && (uboot_initrd_end > uboot_initrd_start)) { initrd_start = uboot_initrd_start; initrd_end = uboot_initrd_end; ROOT_DEV = Root_RAM0; printk(KERN_INFO "initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); } #endif /* if defined(CONFIG_BLK_DEV_INITRD) */ } #endif /* #if defined(CONFIG_UBOOT) */ void __init setup_arch(char **cmdline_p) { int bootmap_size; memory_start = PAGE_ALIGN(_ramstart); memory_end = _ramend; init_mm.start_code = (unsigned long) &_stext; init_mm.end_code = (unsigned long) &_etext; init_mm.end_data = (unsigned long) &_edata; init_mm.brk = (unsigned long) 0; config_BSP(&command_line[0], sizeof(command_line)); #if defined(CONFIG_BOOTPARAM) strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line)); command_line[sizeof(command_line) - 1] = 0; #endif /* CONFIG_BOOTPARAM */ #if defined(CONFIG_UBOOT) /* CONFIG_UBOOT and CONFIG_BOOTPARAM defined, concatenate cmdline */ #if defined(CONFIG_BOOTPARAM) /* Add the whitespace separator */ command_line[strlen(CONFIG_BOOTPARAM_STRING)] = ' '; /* Parse uboot command line into the rest of the buffer */ parse_uboot_commandline( &command_line[(strlen(CONFIG_BOOTPARAM_STRING)+1)], (sizeof(command_line) - (strlen(CONFIG_BOOTPARAM_STRING)+1))); /* Only CONFIG_UBOOT defined, create cmdline */ #else parse_uboot_commandline(&command_line[0], sizeof(command_line)); #endif /* CONFIG_BOOTPARAM */ command_line[sizeof(command_line) - 1] = 0; #endif /* CONFIG_UBOOT */ printk(KERN_INFO "\x0F\r\n\nuClinux/" CPU_NAME "\n"); #ifdef CONFIG_UCDIMM printk(KERN_INFO "uCdimm by Lineo, Inc. <www.lineo.com>\n"); #endif #ifdef CONFIG_M68VZ328 printk(KERN_INFO "M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n"); #endif #ifdef CONFIG_COLDFIRE printk(KERN_INFO "COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n"); #ifdef CONFIG_M5307 printk(KERN_INFO "Modified for M5307 by Dave Miller, dmiller@intellistor.com\n"); #endif #ifdef CONFIG_ELITE printk(KERN_INFO "Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n"); #endif #endif printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n"); #if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 ) printk(KERN_INFO "TRG SuperPilot FLASH card support <info@trgnet.com>\n"); #endif #if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 ) printk(KERN_INFO "PalmV support by Lineo Inc. <jeff@uclinux.com>\n"); #endif #if defined (CONFIG_M68360) printk(KERN_INFO "QUICC port done by SED Systems <hamilton@sedsystems.ca>,\n"); printk(KERN_INFO "based on 2.0.38 port by Lineo Inc. <mleslie@lineo.com>.\n"); #endif #ifdef CONFIG_DRAGEN2 printk(KERN_INFO "DragonEngine II board support by Georges Menie\n"); #endif #ifdef CONFIG_M5235EVB printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, (int) &_sdata, (int) &_edata, (int) &_sbss, (int) &_ebss); pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ", (int) &_ebss, (int) memory_start, (int) memory_start, (int) memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE-1] = 0; #if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif /* * Give all the memory to the bootmap allocator, tell it to put the * boot mem_map at the start of memory. */ bootmap_size = init_bootmem_node( NODE_DATA(0), memory_start >> PAGE_SHIFT, /* map goes here */ PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ memory_end >> PAGE_SHIFT); /* * Free the usable memory, we have to make sure we do not free * the bootmem bitmap so we then reserve it after freeing it :-) */ free_bootmem(memory_start, memory_end - memory_start); reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT); #if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD) if ((initrd_start > 0) && (initrd_start < initrd_end) && (initrd_end < memory_end)) reserve_bootmem(initrd_start, initrd_end - initrd_start, BOOTMEM_DEFAULT); #endif /* if defined(CONFIG_BLK_DEV_INITRD) */ /* * Get kmalloc into gear. */ paging_init(); } /* * Get CPU information for use by the procfs. */ static int show_cpuinfo(struct seq_file *m, void *v) { char *cpu, *mmu, *fpu; u_long clockfreq; cpu = CPU_NAME; mmu = "none"; fpu = "none"; clockfreq = (loops_per_jiffy * HZ) * CPU_INSTR_PER_JIFFY; seq_printf(m, "CPU:\t\t%s\n" "MMU:\t\t%s\n" "FPU:\t\t%s\n" "Clocking:\t%lu.%1luMHz\n" "BogoMips:\t%lu.%02lu\n" "Calibration:\t%lu loops\n", cpu, mmu, fpu, clockfreq / 1000000, (clockfreq / 100000) % 10, (loops_per_jiffy * HZ) / 500000, ((loops_per_jiffy * HZ) / 5000) % 100, (loops_per_jiffy * HZ)); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, };
gpl-2.0
ARMWorksOrg/FA_210_Android_Kernel
drivers/media/video/samsung/fimc/fimc_v4l2.c
91
8048
/* linux/drivers/media/video/samsung/fimc/fimc_v4l2.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * V4L2 interface support file for Samsung Camera Interface (FIMC) driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/videodev2.h> #include <linux/videodev2_samsung.h> #include <media/v4l2-ioctl.h> #include <plat/fimc.h> #include <linux/clk.h> #include "fimc.h" static int fimc_querycap(struct file *filp, void *fh, struct v4l2_capability *cap) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; fimc_info1("%s: called\n", __func__); strcpy(cap->driver, "Samsung FIMC Driver"); strlcpy(cap->card, ctrl->vd->name, sizeof(cap->card)); sprintf(cap->bus_info, "FIMC AHB-bus"); cap->version = 0; cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_STREAMING); return 0; } static int fimc_reqbufs(struct file *filp, void *fh, struct v4l2_requestbuffers *b) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_reqbufs_capture(fh, b); } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_reqbufs_output(fh, b); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_querybuf(struct file *filp, void *fh, struct v4l2_buffer *b) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_querybuf_capture(fh, b); } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_querybuf_output(fh, b); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_g_ctrl(struct file *filp, void *fh, struct v4l2_control *c) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (ctrl->cap != NULL) { ret = fimc_g_ctrl_capture(fh, c); } else if (ctrl->out != NULL) { ret = fimc_g_ctrl_output(fh, c); } else { fimc_err("%s: Invalid case\n", __func__); return -EINVAL; } return ret; } static int fimc_s_ctrl(struct file *filp, void *fh, struct v4l2_control *c) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (ctrl->cap != NULL) { ret = fimc_s_ctrl_capture(fh, c); } else if (ctrl->out != NULL) { ret = fimc_s_ctrl_output(filp, fh, c); } else { fimc_err("%s: Invalid case\n", __func__); return -EINVAL; } return ret; } static int fimc_s_ext_ctrls(struct file *filp, void *fh, struct v4l2_ext_controls *c) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (ctrl->cap != NULL) { ret = fimc_s_ext_ctrls_capture(fh, c); } else { fimc_err("%s: Invalid case\n", __func__); return -EINVAL; } return ret; } static int fimc_cropcap(struct file *filp, void *fh, struct v4l2_cropcap *a) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_cropcap_capture(fh, a); } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_cropcap_output(fh, a); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_g_crop(struct file *filp, void *fh, struct v4l2_crop *a) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_g_crop_capture(fh, a); } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_g_crop_output(fh, a); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_s_crop(struct file *filp, void *fh, struct v4l2_crop *a) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_s_crop_capture(fh, a); } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_s_crop_output(fh, a); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_streamon(struct file *filp, void *fh, enum v4l2_buf_type i) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct s3c_platform_fimc *pdata; int ret = -1; pdata = to_fimc_plat(ctrl->dev); if (i == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_streamon_capture(fh); } else if (i == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_streamon_output(fh); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_streamoff(struct file *filp, void *fh, enum v4l2_buf_type i) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; struct s3c_platform_fimc *pdata; int ret = -1; pdata = to_fimc_plat(ctrl->dev); if (i == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_streamoff_capture(fh); } else if (i == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_streamoff_output(fh); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_qbuf(struct file *filp, void *fh, struct v4l2_buffer *b) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_qbuf_capture(fh, b); } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_qbuf_output(fh, b); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } static int fimc_dqbuf(struct file *filp, void *fh, struct v4l2_buffer *b) { struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl; int ret = -1; if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { ret = fimc_dqbuf_capture(fh, b); } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { ret = fimc_dqbuf_output(fh, b); } else { fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and " "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n"); ret = -EINVAL; } return ret; } const struct v4l2_ioctl_ops fimc_v4l2_ops = { .vidioc_querycap = fimc_querycap, .vidioc_reqbufs = fimc_reqbufs, .vidioc_querybuf = fimc_querybuf, .vidioc_g_ctrl = fimc_g_ctrl, .vidioc_s_ctrl = fimc_s_ctrl, .vidioc_s_ext_ctrls = fimc_s_ext_ctrls, .vidioc_cropcap = fimc_cropcap, .vidioc_g_crop = fimc_g_crop, .vidioc_s_crop = fimc_s_crop, .vidioc_streamon = fimc_streamon, .vidioc_streamoff = fimc_streamoff, .vidioc_qbuf = fimc_qbuf, .vidioc_dqbuf = fimc_dqbuf, .vidioc_enum_fmt_vid_cap = fimc_enum_fmt_vid_capture, .vidioc_g_fmt_vid_cap = fimc_g_fmt_vid_capture, .vidioc_s_fmt_vid_cap = fimc_s_fmt_vid_capture, .vidioc_try_fmt_vid_cap = fimc_try_fmt_vid_capture, .vidioc_enum_input = fimc_enum_input, .vidioc_g_input = fimc_g_input, .vidioc_s_input = fimc_s_input, .vidioc_g_parm = fimc_g_parm, .vidioc_s_parm = fimc_s_parm, .vidioc_queryctrl = fimc_queryctrl, .vidioc_querymenu = fimc_querymenu, .vidioc_g_fmt_vid_out = fimc_g_fmt_vid_out, .vidioc_s_fmt_vid_out = fimc_s_fmt_vid_out, .vidioc_try_fmt_vid_out = fimc_try_fmt_vid_out, .vidioc_g_fbuf = fimc_g_fbuf, .vidioc_s_fbuf = fimc_s_fbuf, .vidioc_try_fmt_vid_overlay = fimc_try_fmt_overlay, .vidioc_g_fmt_vid_overlay = fimc_g_fmt_vid_overlay, .vidioc_s_fmt_vid_overlay = fimc_s_fmt_vid_overlay, };
gpl-2.0
sigma-random/gcc
gcc/testsuite/gcc.target/i386/avx512bw-vpunpckhwd-2.c
91
1479
/* { dg-do run } */ /* { dg-options "-O2 -mavx512bw" } */ /* { dg-require-effective-target avx512bw } */ #define AVX512BW #include "avx512f-helper.h" #define SIZE (AVX512F_LEN / 16) #include "avx512f-mask-type.h" void CALC (short *r, short *s1, short *s2) { int i; for (i = 0; i < SIZE/8; i++) { r[8 * i] = s1[8 * i + 4]; r[8 * i + 1] = s2[8 * i + 4]; r[8 * i + 2] = s1[8 * i + 5]; r[8 * i + 3] = s2[8 * i + 5]; r[8 * i + 4] = s1[8 * i + 6]; r[8 * i + 5] = s2[8 * i + 6]; r[8 * i + 6] = s1[8 * i + 7]; r[8 * i + 7] = s2[8 * i + 7]; } } void TEST (void) { int i, sign; UNION_TYPE (AVX512F_LEN, i_w) res1, res2, res3, src1, src2; MASK_TYPE mask = MASK_VALUE; short res_ref[SIZE]; sign = -1; for (i = 0; i < SIZE; i++) { src1.a[i] = 34 * i * sign; src1.a[i] = 179 * i; sign = sign * -1; } for (i = 0; i < SIZE; i++) res2.a[i] = DEFAULT_VALUE; res1.x = INTRINSIC (_unpackhi_epi16) (src1.x, src2.x); res2.x = INTRINSIC (_mask_unpackhi_epi16) (res2.x, mask, src1.x, src2.x); res3.x = INTRINSIC (_maskz_unpackhi_epi16) (mask, src1.x, src2.x); CALC (res_ref, src1.a, src2.a); if (UNION_CHECK (AVX512F_LEN, i_w) (res1, res_ref)) abort (); MASK_MERGE (i_w) (res_ref, mask, SIZE); if (UNION_CHECK (AVX512F_LEN, i_w) (res2, res_ref)) abort (); MASK_ZERO (i_w) (res_ref, mask, SIZE); if (UNION_CHECK (AVX512F_LEN, i_w) (res3, res_ref)) abort (); }
gpl-2.0
Flipkart/linux
arch/arm/mach-omap2/vc.c
347
23135
/* * OMAP Voltage Controller (VC) interface * * Copyright (C) 2011 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/bug.h> #include <linux/io.h> #include <asm/div64.h> #include "iomap.h" #include "soc.h" #include "voltage.h" #include "vc.h" #include "prm-regbits-34xx.h" #include "prm-regbits-44xx.h" #include "prm44xx.h" #include "pm.h" #include "scrm44xx.h" #include "control.h" /** * struct omap_vc_channel_cfg - describe the cfg_channel bitfield * @sa: bit for slave address * @rav: bit for voltage configuration register * @rac: bit for command configuration register * @racen: enable bit for RAC * @cmd: bit for command value set selection * * Channel configuration bits, common for OMAP3+ * OMAP3 register: PRM_VC_CH_CONF * OMAP4 register: PRM_VC_CFG_CHANNEL * OMAP5 register: PRM_VC_SMPS_<voltdm>_CONFIG */ struct omap_vc_channel_cfg { u8 sa; u8 rav; u8 rac; u8 racen; u8 cmd; }; static struct omap_vc_channel_cfg vc_default_channel_cfg = { .sa = BIT(0), .rav = BIT(1), .rac = BIT(2), .racen = BIT(3), .cmd = BIT(4), }; /* * On OMAP3+, all VC channels have the above default bitfield * configuration, except the OMAP4 MPU channel. This appears * to be a freak accident as every other VC channel has the * default configuration, thus creating a mutant channel config. */ static struct omap_vc_channel_cfg vc_mutant_channel_cfg = { .sa = BIT(0), .rav = BIT(2), .rac = BIT(3), .racen = BIT(4), .cmd = BIT(1), }; static struct omap_vc_channel_cfg *vc_cfg_bits; /* Default I2C trace length on pcb, 6.3cm. Used for capacitance calculations. */ static u32 sr_i2c_pcb_length = 63; #define CFG_CHANNEL_MASK 0x1f /** * omap_vc_config_channel - configure VC channel to PMIC mappings * @voltdm: pointer to voltagdomain defining the desired VC channel * * Configures the VC channel to PMIC mappings for the following * PMIC settings * - i2c slave address (SA) * - voltage configuration address (RAV) * - command configuration address (RAC) and enable bit (RACEN) * - command values for ON, ONLP, RET and OFF (CMD) * * This function currently only allows flexible configuration of the * non-default channel. Starting with OMAP4, there are more than 2 * channels, with one defined as the default (on OMAP4, it's MPU.) * Only the non-default channel can be configured. */ static int omap_vc_config_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; /* * For default channel, the only configurable bit is RACEN. * All others must stay at zero (see function comment above.) */ if (vc->flags & OMAP_VC_CHANNEL_DEFAULT) vc->cfg_channel &= vc_cfg_bits->racen; voltdm->rmw(CFG_CHANNEL_MASK << vc->cfg_channel_sa_shift, vc->cfg_channel << vc->cfg_channel_sa_shift, vc->cfg_channel_reg); return 0; } /* Voltage scale and accessory APIs */ int omap_vc_pre_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 *target_vsel, u8 *current_vsel) { struct omap_vc_channel *vc = voltdm->vc; u32 vc_cmdval; /* Check if sufficient pmic info is available for this vdd */ if (!voltdm->pmic) { pr_err("%s: Insufficient pmic info to scale the vdd_%s\n", __func__, voltdm->name); return -EINVAL; } if (!voltdm->pmic->uv_to_vsel) { pr_err("%s: PMIC function to convert voltage in uV to vsel not registered. Hence unable to scale voltage for vdd_%s\n", __func__, voltdm->name); return -ENODATA; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return -EINVAL; } *target_vsel = voltdm->pmic->uv_to_vsel(target_volt); *current_vsel = voltdm->pmic->uv_to_vsel(voltdm->nominal_volt); /* Setting the ON voltage to the new target voltage */ vc_cmdval = voltdm->read(vc->cmdval_reg); vc_cmdval &= ~vc->common->cmd_on_mask; vc_cmdval |= (*target_vsel << vc->common->cmd_on_shift); voltdm->write(vc_cmdval, vc->cmdval_reg); voltdm->vc_param->on = target_volt; omap_vp_update_errorgain(voltdm, target_volt); return 0; } void omap_vc_post_scale(struct voltagedomain *voltdm, unsigned long target_volt, u8 target_vsel, u8 current_vsel) { u32 smps_steps = 0, smps_delay = 0; smps_steps = abs(target_vsel - current_vsel); /* SMPS slew rate / step size. 2us added as buffer. */ smps_delay = ((smps_steps * voltdm->pmic->step_size) / voltdm->pmic->slew_rate) + 2; udelay(smps_delay); } /* vc_bypass_scale - VC bypass method of voltage scaling */ int omap_vc_bypass_scale(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_vc_channel *vc = voltdm->vc; u32 loop_cnt = 0, retries_cnt = 0; u32 vc_valid, vc_bypass_val_reg, vc_bypass_value; u8 target_vsel, current_vsel; int ret; ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, &current_vsel); if (ret) return ret; vc_valid = vc->common->valid; vc_bypass_val_reg = vc->common->bypass_val_reg; vc_bypass_value = (target_vsel << vc->common->data_shift) | (vc->volt_reg_addr << vc->common->regaddr_shift) | (vc->i2c_slave_addr << vc->common->slaveaddr_shift); voltdm->write(vc_bypass_value, vc_bypass_val_reg); voltdm->write(vc_bypass_value | vc_valid, vc_bypass_val_reg); vc_bypass_value = voltdm->read(vc_bypass_val_reg); /* * Loop till the bypass command is acknowledged from the SMPS. * NOTE: This is legacy code. The loop count and retry count needs * to be revisited. */ while (!(vc_bypass_value & vc_valid)) { loop_cnt++; if (retries_cnt > 10) { pr_warn("%s: Retry count exceeded\n", __func__); return -ETIMEDOUT; } if (loop_cnt > 50) { retries_cnt++; loop_cnt = 0; udelay(10); } vc_bypass_value = voltdm->read(vc_bypass_val_reg); } omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel); return 0; } /* Convert microsecond value to number of 32kHz clock cycles */ static inline u32 omap_usec_to_32k(u32 usec) { return DIV_ROUND_UP_ULL(32768ULL * (u64)usec, 1000000ULL); } struct omap3_vc_timings { u32 voltsetup1; u32 voltsetup2; }; struct omap3_vc { struct voltagedomain *vd; u32 voltctrl; u32 voltsetup1; u32 voltsetup2; struct omap3_vc_timings timings[2]; }; static struct omap3_vc vc; void omap3_vc_set_pmic_signaling(int core_next_state) { struct voltagedomain *vd = vc.vd; struct omap3_vc_timings *c = vc.timings; u32 voltctrl, voltsetup1, voltsetup2; voltctrl = vc.voltctrl; voltsetup1 = vc.voltsetup1; voltsetup2 = vc.voltsetup2; switch (core_next_state) { case PWRDM_POWER_OFF: voltctrl &= ~(OMAP3430_PRM_VOLTCTRL_AUTO_RET | OMAP3430_PRM_VOLTCTRL_AUTO_SLEEP); voltctrl |= OMAP3430_PRM_VOLTCTRL_AUTO_OFF; if (voltctrl & OMAP3430_PRM_VOLTCTRL_SEL_OFF) voltsetup2 = c->voltsetup2; else voltsetup1 = c->voltsetup1; break; case PWRDM_POWER_RET: default: c++; voltctrl &= ~(OMAP3430_PRM_VOLTCTRL_AUTO_OFF | OMAP3430_PRM_VOLTCTRL_AUTO_SLEEP); voltctrl |= OMAP3430_PRM_VOLTCTRL_AUTO_RET; voltsetup1 = c->voltsetup1; break; } if (voltctrl != vc.voltctrl) { vd->write(voltctrl, OMAP3_PRM_VOLTCTRL_OFFSET); vc.voltctrl = voltctrl; } if (voltsetup1 != vc.voltsetup1) { vd->write(c->voltsetup1, OMAP3_PRM_VOLTSETUP1_OFFSET); vc.voltsetup1 = voltsetup1; } if (voltsetup2 != vc.voltsetup2) { vd->write(c->voltsetup2, OMAP3_PRM_VOLTSETUP2_OFFSET); vc.voltsetup2 = voltsetup2; } } #define PRM_POLCTRL_TWL_MASK (OMAP3430_PRM_POLCTRL_CLKREQ_POL | \ OMAP3430_PRM_POLCTRL_CLKREQ_POL) #define PRM_POLCTRL_TWL_VAL OMAP3430_PRM_POLCTRL_CLKREQ_POL /* * Configure signal polarity for sys_clkreq and sys_off_mode pins * as the default values are wrong and can cause the system to hang * if any twl4030 scripts are loaded. */ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) { u32 val; if (vc.vd) return; vc.vd = voltdm; val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) { val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", val); voltdm->write(val, OMAP3_PRM_POLCTRL_OFFSET); } /* * By default let's use I2C4 signaling for retention idle * and sys_off_mode pin signaling for off idle. This way we * have sys_clk_req pin go down for retention and both * sys_clk_req and sys_off_mode pins will go down for off * idle. And we can also scale voltages to zero for off-idle. * Note that no actual voltage scaling during off-idle will * happen unless the board specific twl4030 PMIC scripts are * loaded. */ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { val |= OMAP3430_PRM_VOLTCTRL_SEL_OFF; pr_debug("PM: setting voltctrl sys_off_mode signaling to 0x%x\n", val); voltdm->write(val, OMAP3_PRM_VOLTCTRL_OFFSET); } vc.voltctrl = val; omap3_vc_set_pmic_signaling(PWRDM_POWER_ON); } static void omap3_init_voltsetup1(struct voltagedomain *voltdm, struct omap3_vc_timings *c, u32 idle) { unsigned long val; val = (voltdm->vc_param->on - idle) / voltdm->pmic->slew_rate; val *= voltdm->sys_clk.rate / 8 / 1000000 + 1; val <<= __ffs(voltdm->vfsm->voltsetup_mask); c->voltsetup1 &= ~voltdm->vfsm->voltsetup_mask; c->voltsetup1 |= val; } /** * omap3_set_i2c_timings - sets i2c sleep timings for a channel * @voltdm: channel to configure * @off_mode: select whether retention or off mode values used * * Calculates and sets up voltage controller to use I2C based * voltage scaling for sleep modes. This can be used for either off mode * or retention. Off mode has additionally an option to use sys_off_mode * pad, which uses a global signal to program the whole power IC to * off-mode. * * Note that pmic is not controlling the voltage scaling during * retention signaled over I2C4, so we can keep voltsetup2 as 0. * And the oscillator is not shut off over I2C4, so no need to * set clksetup. */ static void omap3_set_i2c_timings(struct voltagedomain *voltdm) { struct omap3_vc_timings *c = vc.timings; /* Configure PRWDM_POWER_OFF over I2C4 */ omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->off); c++; /* Configure PRWDM_POWER_RET over I2C4 */ omap3_init_voltsetup1(voltdm, c, voltdm->vc_param->ret); } /** * omap3_set_off_timings - sets off-mode timings for a channel * @voltdm: channel to configure * * Calculates and sets up off-mode timings for a channel. Off-mode * can use either I2C based voltage scaling, or alternatively * sys_off_mode pad can be used to send a global command to power IC.n, * sys_off_mode has the additional benefit that voltages can be * scaled to zero volt level with TWL4030 / TWL5030, I2C can only * scale to 600mV. * * Note that omap is not controlling the voltage scaling during * off idle signaled by sys_off_mode, so we can keep voltsetup1 * as 0. */ static void omap3_set_off_timings(struct voltagedomain *voltdm) { struct omap3_vc_timings *c = vc.timings; u32 tstart, tshut, clksetup, voltoffset; if (c->voltsetup2) return; omap_pm_get_oscillator(&tstart, &tshut); if (tstart == ULONG_MAX) { pr_debug("PM: oscillator start-up time not initialized, using 10ms\n"); clksetup = omap_usec_to_32k(10000); } else { clksetup = omap_usec_to_32k(tstart); } /* * For twl4030 errata 27, we need to allow minimum ~488.32 us wait to * switch from HFCLKIN to internal oscillator. That means timings * have voltoffset fixed to 0xa in rounded up 32 KiHz cycles. And * that means we can calculate the value based on the oscillator * start-up time since voltoffset2 = clksetup - voltoffset. */ voltoffset = omap_usec_to_32k(488); c->voltsetup2 = clksetup - voltoffset; voltdm->write(clksetup, OMAP3_PRM_CLKSETUP_OFFSET); voltdm->write(voltoffset, OMAP3_PRM_VOLTOFFSET_OFFSET); } static void __init omap3_vc_init_channel(struct voltagedomain *voltdm) { omap3_vc_init_pmic_signaling(voltdm); omap3_set_off_timings(voltdm); omap3_set_i2c_timings(voltdm); } /** * omap4_calc_volt_ramp - calculates voltage ramping delays on omap4 * @voltdm: channel to calculate values for * @voltage_diff: voltage difference in microvolts * * Calculates voltage ramp prescaler + counter values for a voltage * difference on omap4. Returns a field value suitable for writing to * VOLTSETUP register for a channel in following format: * bits[8:9] prescaler ... bits[0:5] counter. See OMAP4 TRM for reference. */ static u32 omap4_calc_volt_ramp(struct voltagedomain *voltdm, u32 voltage_diff) { u32 prescaler; u32 cycles; u32 time; time = voltage_diff / voltdm->pmic->slew_rate; cycles = voltdm->sys_clk.rate / 1000 * time / 1000; cycles /= 64; prescaler = 0; /* shift to next prescaler until no overflow */ /* scale for div 256 = 64 * 4 */ if (cycles > 63) { cycles /= 4; prescaler++; } /* scale for div 512 = 256 * 2 */ if (cycles > 63) { cycles /= 2; prescaler++; } /* scale for div 2048 = 512 * 4 */ if (cycles > 63) { cycles /= 4; prescaler++; } /* check for overflow => invalid ramp time */ if (cycles > 63) { pr_warn("%s: invalid setuptime for vdd_%s\n", __func__, voltdm->name); return 0; } cycles++; return (prescaler << OMAP4430_RAMP_UP_PRESCAL_SHIFT) | (cycles << OMAP4430_RAMP_UP_COUNT_SHIFT); } /** * omap4_usec_to_val_scrm - convert microsecond value to SCRM module bitfield * @usec: microseconds * @shift: number of bits to shift left * @mask: bitfield mask * * Converts microsecond value to OMAP4 SCRM bitfield. Bitfield is * shifted to requested position, and checked agains the mask value. * If larger, forced to the max value of the field (i.e. the mask itself.) * Returns the SCRM bitfield value. */ static u32 omap4_usec_to_val_scrm(u32 usec, int shift, u32 mask) { u32 val; val = omap_usec_to_32k(usec) << shift; /* Check for overflow, if yes, force to max value */ if (val > mask) val = mask; return val; } /** * omap4_set_timings - set voltage ramp timings for a channel * @voltdm: channel to configure * @off_mode: whether off-mode values are used * * Calculates and sets the voltage ramp up / down values for a channel. */ static void omap4_set_timings(struct voltagedomain *voltdm, bool off_mode) { u32 val; u32 ramp; int offset; u32 tstart, tshut; if (off_mode) { ramp = omap4_calc_volt_ramp(voltdm, voltdm->vc_param->on - voltdm->vc_param->off); offset = voltdm->vfsm->voltsetup_off_reg; } else { ramp = omap4_calc_volt_ramp(voltdm, voltdm->vc_param->on - voltdm->vc_param->ret); offset = voltdm->vfsm->voltsetup_reg; } if (!ramp) return; val = voltdm->read(offset); val |= ramp << OMAP4430_RAMP_DOWN_COUNT_SHIFT; val |= ramp << OMAP4430_RAMP_UP_COUNT_SHIFT; voltdm->write(val, offset); omap_pm_get_oscillator(&tstart, &tshut); val = omap4_usec_to_val_scrm(tstart, OMAP4_SETUPTIME_SHIFT, OMAP4_SETUPTIME_MASK); val |= omap4_usec_to_val_scrm(tshut, OMAP4_DOWNTIME_SHIFT, OMAP4_DOWNTIME_MASK); writel_relaxed(val, OMAP4_SCRM_CLKSETUPTIME); } /* OMAP4 specific voltage init functions */ static void __init omap4_vc_init_channel(struct voltagedomain *voltdm) { omap4_set_timings(voltdm, true); omap4_set_timings(voltdm, false); } struct i2c_init_data { u8 loadbits; u8 load; u8 hsscll_38_4; u8 hsscll_26; u8 hsscll_19_2; u8 hsscll_16_8; u8 hsscll_12; }; static const __initdata struct i2c_init_data omap4_i2c_timing_data[] = { { .load = 50, .loadbits = 0x3, .hsscll_38_4 = 13, .hsscll_26 = 11, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 25, .loadbits = 0x2, .hsscll_38_4 = 13, .hsscll_26 = 11, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 12, .loadbits = 0x1, .hsscll_38_4 = 11, .hsscll_26 = 10, .hsscll_19_2 = 9, .hsscll_16_8 = 9, .hsscll_12 = 8, }, { .load = 0, .loadbits = 0x0, .hsscll_38_4 = 12, .hsscll_26 = 10, .hsscll_19_2 = 9, .hsscll_16_8 = 8, .hsscll_12 = 8, }, }; /** * omap4_vc_i2c_timing_init - sets up board I2C timing parameters * @voltdm: voltagedomain pointer to get data from * * Use PMIC + board supplied settings for calculating the total I2C * channel capacitance and set the timing parameters based on this. * Pre-calculated values are provided in data tables, as it is not * too straightforward to calculate these runtime. */ static void __init omap4_vc_i2c_timing_init(struct voltagedomain *voltdm) { u32 capacitance; u32 val; u16 hsscll; const struct i2c_init_data *i2c_data; if (!voltdm->pmic->i2c_high_speed) { pr_warn("%s: only high speed supported!\n", __func__); return; } /* PCB trace capacitance, 0.125pF / mm => mm / 8 */ capacitance = DIV_ROUND_UP(sr_i2c_pcb_length, 8); /* OMAP pad capacitance */ capacitance += 4; /* PMIC pad capacitance */ capacitance += voltdm->pmic->i2c_pad_load; /* Search for capacitance match in the table */ i2c_data = omap4_i2c_timing_data; while (i2c_data->load > capacitance) i2c_data++; /* Select proper values based on sysclk frequency */ switch (voltdm->sys_clk.rate) { case 38400000: hsscll = i2c_data->hsscll_38_4; break; case 26000000: hsscll = i2c_data->hsscll_26; break; case 19200000: hsscll = i2c_data->hsscll_19_2; break; case 16800000: hsscll = i2c_data->hsscll_16_8; break; case 12000000: hsscll = i2c_data->hsscll_12; break; default: pr_warn("%s: unsupported sysclk rate: %d!\n", __func__, voltdm->sys_clk.rate); return; } /* Loadbits define pull setup for the I2C channels */ val = i2c_data->loadbits << 25 | i2c_data->loadbits << 29; /* Write to SYSCTRL_PADCONF_WKUP_CTRL_I2C_2 to setup I2C pull */ writel_relaxed(val, OMAP2_L4_IO_ADDRESS(OMAP4_CTRL_MODULE_PAD_WKUP + OMAP4_CTRL_MODULE_PAD_WKUP_CONTROL_I2C_2)); /* HSSCLH can always be zero */ val = hsscll << OMAP4430_HSSCLL_SHIFT; val |= (0x28 << OMAP4430_SCLL_SHIFT | 0x2c << OMAP4430_SCLH_SHIFT); /* Write setup times to I2C config register */ voltdm->write(val, OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET); } /** * omap_vc_i2c_init - initialize I2C interface to PMIC * @voltdm: voltage domain containing VC data * * Use PMIC supplied settings for I2C high-speed mode and * master code (if set) and program the VC I2C configuration * register. * * The VC I2C configuration is common to all VC channels, * so this function only configures I2C for the first VC * channel registers. All other VC channels will use the * same configuration. */ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; static bool initialized; static bool i2c_high_speed; u8 mcode; if (initialized) { if (voltdm->pmic->i2c_high_speed != i2c_high_speed) pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n", __func__, voltdm->name, i2c_high_speed); return; } i2c_high_speed = voltdm->pmic->i2c_high_speed; if (i2c_high_speed) voltdm->rmw(vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_reg); mcode = voltdm->pmic->i2c_mcode; if (mcode) voltdm->rmw(vc->common->i2c_mcode_mask, mcode << __ffs(vc->common->i2c_mcode_mask), vc->common->i2c_cfg_reg); if (cpu_is_omap44xx()) omap4_vc_i2c_timing_init(voltdm); initialized = true; } /** * omap_vc_calc_vsel - calculate vsel value for a channel * @voltdm: channel to calculate value for * @uvolt: microvolt value to convert to vsel * * Converts a microvolt value to vsel value for the used PMIC. * This checks whether the microvolt value is out of bounds, and * adjusts the value accordingly. If unsupported value detected, * warning is thrown. */ static u8 omap_vc_calc_vsel(struct voltagedomain *voltdm, u32 uvolt) { if (voltdm->pmic->vddmin > uvolt) uvolt = voltdm->pmic->vddmin; if (voltdm->pmic->vddmax < uvolt) { WARN(1, "%s: voltage not supported by pmic: %u vs max %u\n", __func__, uvolt, voltdm->pmic->vddmax); /* Lets try maximum value anyway */ uvolt = voltdm->pmic->vddmax; } return voltdm->pmic->uv_to_vsel(uvolt); } #ifdef CONFIG_PM /** * omap_pm_setup_sr_i2c_pcb_length - set length of SR I2C traces on PCB * @mm: length of the PCB trace in millimetres * * Sets the PCB trace length for the I2C channel. By default uses 63mm. * This is needed for properly calculating the capacitance value for * the PCB trace, and for setting the SR I2C channel timing parameters. */ void __init omap_pm_setup_sr_i2c_pcb_length(u32 mm) { sr_i2c_pcb_length = mm; } #endif void __init omap_vc_init_channel(struct voltagedomain *voltdm) { struct omap_vc_channel *vc = voltdm->vc; u8 on_vsel, onlp_vsel, ret_vsel, off_vsel; u32 val; if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); return; } if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } vc->cfg_channel = 0; if (vc->flags & OMAP_VC_CHANNEL_CFG_MUTANT) vc_cfg_bits = &vc_mutant_channel_cfg; else vc_cfg_bits = &vc_default_channel_cfg; /* get PMIC/board specific settings */ vc->i2c_slave_addr = voltdm->pmic->i2c_slave_addr; vc->volt_reg_addr = voltdm->pmic->volt_reg_addr; vc->cmd_reg_addr = voltdm->pmic->cmd_reg_addr; /* Configure the i2c slave address for this VC */ voltdm->rmw(vc->smps_sa_mask, vc->i2c_slave_addr << __ffs(vc->smps_sa_mask), vc->smps_sa_reg); vc->cfg_channel |= vc_cfg_bits->sa; /* * Configure the PMIC register addresses. */ voltdm->rmw(vc->smps_volra_mask, vc->volt_reg_addr << __ffs(vc->smps_volra_mask), vc->smps_volra_reg); vc->cfg_channel |= vc_cfg_bits->rav; if (vc->cmd_reg_addr) { voltdm->rmw(vc->smps_cmdra_mask, vc->cmd_reg_addr << __ffs(vc->smps_cmdra_mask), vc->smps_cmdra_reg); vc->cfg_channel |= vc_cfg_bits->rac; } if (vc->cmd_reg_addr == vc->volt_reg_addr) vc->cfg_channel |= vc_cfg_bits->racen; /* Set up the on, inactive, retention and off voltage */ on_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->on); onlp_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->onlp); ret_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->ret); off_vsel = omap_vc_calc_vsel(voltdm, voltdm->vc_param->off); val = ((on_vsel << vc->common->cmd_on_shift) | (onlp_vsel << vc->common->cmd_onlp_shift) | (ret_vsel << vc->common->cmd_ret_shift) | (off_vsel << vc->common->cmd_off_shift)); voltdm->write(val, vc->cmdval_reg); vc->cfg_channel |= vc_cfg_bits->cmd; /* Channel configuration */ omap_vc_config_channel(voltdm); omap_vc_i2c_init(voltdm); if (cpu_is_omap34xx()) omap3_vc_init_channel(voltdm); else if (cpu_is_omap44xx()) omap4_vc_init_channel(voltdm); }
gpl-2.0
bond-os/linux
sound/soc/codecs/wm8350.c
347
50549
/* * wm8350.c -- WM8350 ALSA SoC audio driver * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/mfd/wm8350/audio.h> #include <linux/mfd/wm8350/core.h> #include <linux/regulator/consumer.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8350.h" #define WM8350_OUTn_0dB 0x39 #define WM8350_RAMP_NONE 0 #define WM8350_RAMP_UP 1 #define WM8350_RAMP_DOWN 2 /* We only include the analogue supplies here; the digital supplies * need to be available well before this driver can be probed. */ static const char *supply_names[] = { "AVDD", "HPVDD", }; struct wm8350_output { u16 active; u16 left_vol; u16 right_vol; u16 ramp; u16 mute; }; struct wm8350_jack_data { struct snd_soc_jack *jack; int report; }; struct wm8350_data { struct snd_soc_codec codec; struct wm8350_output out1; struct wm8350_output out2; struct wm8350_jack_data hpl; struct wm8350_jack_data hpr; struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)]; int fll_freq_out; int fll_freq_in; }; static unsigned int wm8350_codec_cache_read(struct snd_soc_codec *codec, unsigned int reg) { struct wm8350 *wm8350 = codec->control_data; return wm8350->reg_cache[reg]; } static unsigned int wm8350_codec_read(struct snd_soc_codec *codec, unsigned int reg) { struct wm8350 *wm8350 = codec->control_data; return wm8350_reg_read(wm8350, reg); } static int wm8350_codec_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct wm8350 *wm8350 = codec->control_data; return wm8350_reg_write(wm8350, reg, value); } /* * Ramp OUT1 PGA volume to minimise pops at stream startup and shutdown. */ static inline int wm8350_out1_ramp_step(struct snd_soc_codec *codec) { struct wm8350_data *wm8350_data = codec->private_data; struct wm8350_output *out1 = &wm8350_data->out1; struct wm8350 *wm8350 = codec->control_data; int left_complete = 0, right_complete = 0; u16 reg, val; /* left channel */ reg = wm8350_reg_read(wm8350, WM8350_LOUT1_VOLUME); val = (reg & WM8350_OUT1L_VOL_MASK) >> WM8350_OUT1L_VOL_SHIFT; if (out1->ramp == WM8350_RAMP_UP) { /* ramp step up */ if (val < out1->left_vol) { val++; reg &= ~WM8350_OUT1L_VOL_MASK; wm8350_reg_write(wm8350, WM8350_LOUT1_VOLUME, reg | (val << WM8350_OUT1L_VOL_SHIFT)); } else left_complete = 1; } else if (out1->ramp == WM8350_RAMP_DOWN) { /* ramp step down */ if (val > 0) { val--; reg &= ~WM8350_OUT1L_VOL_MASK; wm8350_reg_write(wm8350, WM8350_LOUT1_VOLUME, reg | (val << WM8350_OUT1L_VOL_SHIFT)); } else left_complete = 1; } else return 1; /* right channel */ reg = wm8350_reg_read(wm8350, WM8350_ROUT1_VOLUME); val = (reg & WM8350_OUT1R_VOL_MASK) >> WM8350_OUT1R_VOL_SHIFT; if (out1->ramp == WM8350_RAMP_UP) { /* ramp step up */ if (val < out1->right_vol) { val++; reg &= ~WM8350_OUT1R_VOL_MASK; wm8350_reg_write(wm8350, WM8350_ROUT1_VOLUME, reg | (val << WM8350_OUT1R_VOL_SHIFT)); } else right_complete = 1; } else if (out1->ramp == WM8350_RAMP_DOWN) { /* ramp step down */ if (val > 0) { val--; reg &= ~WM8350_OUT1R_VOL_MASK; wm8350_reg_write(wm8350, WM8350_ROUT1_VOLUME, reg | (val << WM8350_OUT1R_VOL_SHIFT)); } else right_complete = 1; } /* only hit the update bit if either volume has changed this step */ if (!left_complete || !right_complete) wm8350_set_bits(wm8350, WM8350_LOUT1_VOLUME, WM8350_OUT1_VU); return left_complete & right_complete; } /* * Ramp OUT2 PGA volume to minimise pops at stream startup and shutdown. */ static inline int wm8350_out2_ramp_step(struct snd_soc_codec *codec) { struct wm8350_data *wm8350_data = codec->private_data; struct wm8350_output *out2 = &wm8350_data->out2; struct wm8350 *wm8350 = codec->control_data; int left_complete = 0, right_complete = 0; u16 reg, val; /* left channel */ reg = wm8350_reg_read(wm8350, WM8350_LOUT2_VOLUME); val = (reg & WM8350_OUT2L_VOL_MASK) >> WM8350_OUT1L_VOL_SHIFT; if (out2->ramp == WM8350_RAMP_UP) { /* ramp step up */ if (val < out2->left_vol) { val++; reg &= ~WM8350_OUT2L_VOL_MASK; wm8350_reg_write(wm8350, WM8350_LOUT2_VOLUME, reg | (val << WM8350_OUT1L_VOL_SHIFT)); } else left_complete = 1; } else if (out2->ramp == WM8350_RAMP_DOWN) { /* ramp step down */ if (val > 0) { val--; reg &= ~WM8350_OUT2L_VOL_MASK; wm8350_reg_write(wm8350, WM8350_LOUT2_VOLUME, reg | (val << WM8350_OUT1L_VOL_SHIFT)); } else left_complete = 1; } else return 1; /* right channel */ reg = wm8350_reg_read(wm8350, WM8350_ROUT2_VOLUME); val = (reg & WM8350_OUT2R_VOL_MASK) >> WM8350_OUT1R_VOL_SHIFT; if (out2->ramp == WM8350_RAMP_UP) { /* ramp step up */ if (val < out2->right_vol) { val++; reg &= ~WM8350_OUT2R_VOL_MASK; wm8350_reg_write(wm8350, WM8350_ROUT2_VOLUME, reg | (val << WM8350_OUT1R_VOL_SHIFT)); } else right_complete = 1; } else if (out2->ramp == WM8350_RAMP_DOWN) { /* ramp step down */ if (val > 0) { val--; reg &= ~WM8350_OUT2R_VOL_MASK; wm8350_reg_write(wm8350, WM8350_ROUT2_VOLUME, reg | (val << WM8350_OUT1R_VOL_SHIFT)); } else right_complete = 1; } /* only hit the update bit if either volume has changed this step */ if (!left_complete || !right_complete) wm8350_set_bits(wm8350, WM8350_LOUT2_VOLUME, WM8350_OUT2_VU); return left_complete & right_complete; } /* * This work ramps both output PGAs at stream start/stop time to * minimise pop associated with DAPM power switching. * It's best to enable Zero Cross when ramping occurs to minimise any * zipper noises. */ static void wm8350_pga_work(struct work_struct *work) { struct snd_soc_codec *codec = container_of(work, struct snd_soc_codec, delayed_work.work); struct wm8350_data *wm8350_data = codec->private_data; struct wm8350_output *out1 = &wm8350_data->out1, *out2 = &wm8350_data->out2; int i, out1_complete, out2_complete; /* do we need to ramp at all ? */ if (out1->ramp == WM8350_RAMP_NONE && out2->ramp == WM8350_RAMP_NONE) return; /* PGA volumes have 6 bits of resolution to ramp */ for (i = 0; i <= 63; i++) { out1_complete = 1, out2_complete = 1; if (out1->ramp != WM8350_RAMP_NONE) out1_complete = wm8350_out1_ramp_step(codec); if (out2->ramp != WM8350_RAMP_NONE) out2_complete = wm8350_out2_ramp_step(codec); /* ramp finished ? */ if (out1_complete && out2_complete) break; /* we need to delay longer on the up ramp */ if (out1->ramp == WM8350_RAMP_UP || out2->ramp == WM8350_RAMP_UP) { /* delay is longer over 0dB as increases are larger */ if (i >= WM8350_OUTn_0dB) schedule_timeout_interruptible(msecs_to_jiffies (2)); else schedule_timeout_interruptible(msecs_to_jiffies (1)); } else udelay(50); /* doesn't matter if we delay longer */ } out1->ramp = WM8350_RAMP_NONE; out2->ramp = WM8350_RAMP_NONE; } /* * WM8350 Controls */ static int pga_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct wm8350_data *wm8350_data = codec->private_data; struct wm8350_output *out; switch (w->shift) { case 0: case 1: out = &wm8350_data->out1; break; case 2: case 3: out = &wm8350_data->out2; break; default: BUG(); return -1; } switch (event) { case SND_SOC_DAPM_POST_PMU: out->ramp = WM8350_RAMP_UP; out->active = 1; if (!delayed_work_pending(&codec->delayed_work)) schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1)); break; case SND_SOC_DAPM_PRE_PMD: out->ramp = WM8350_RAMP_DOWN; out->active = 0; if (!delayed_work_pending(&codec->delayed_work)) schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1)); break; } return 0; } static int wm8350_put_volsw_2r_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8350_data *wm8350_priv = codec->private_data; struct wm8350_output *out = NULL; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int ret; unsigned int reg = mc->reg; u16 val; /* For OUT1 and OUT2 we shadow the values and only actually write * them out when active in order to ensure the amplifier comes on * as quietly as possible. */ switch (reg) { case WM8350_LOUT1_VOLUME: out = &wm8350_priv->out1; break; case WM8350_LOUT2_VOLUME: out = &wm8350_priv->out2; break; default: break; } if (out) { out->left_vol = ucontrol->value.integer.value[0]; out->right_vol = ucontrol->value.integer.value[1]; if (!out->active) return 1; } ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); if (ret < 0) return ret; /* now hit the volume update bits (always bit 8) */ val = wm8350_codec_read(codec, reg); wm8350_codec_write(codec, reg, val | WM8350_OUT1_VU); return 1; } static int wm8350_get_volsw_2r(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8350_data *wm8350_priv = codec->private_data; struct wm8350_output *out1 = &wm8350_priv->out1; struct wm8350_output *out2 = &wm8350_priv->out2; struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int reg = mc->reg; /* If these are cached registers use the cache */ switch (reg) { case WM8350_LOUT1_VOLUME: ucontrol->value.integer.value[0] = out1->left_vol; ucontrol->value.integer.value[1] = out1->right_vol; return 0; case WM8350_LOUT2_VOLUME: ucontrol->value.integer.value[0] = out2->left_vol; ucontrol->value.integer.value[1] = out2->right_vol; return 0; default: break; } return snd_soc_get_volsw_2r(kcontrol, ucontrol); } /* double control with volume update */ #define SOC_WM8350_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, \ xinvert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE | \ SNDRV_CTL_ELEM_ACCESS_VOLATILE, \ .tlv.p = (tlv_array), \ .info = snd_soc_info_volsw_2r, \ .get = wm8350_get_volsw_2r, .put = wm8350_put_volsw_2r_vu, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ {.reg = reg_left, .rreg = reg_right, .shift = xshift, \ .rshift = xshift, .max = xmax, .invert = xinvert}, } static const char *wm8350_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" }; static const char *wm8350_pol[] = { "Normal", "Inv R", "Inv L", "Inv L & R" }; static const char *wm8350_dacmutem[] = { "Normal", "Soft" }; static const char *wm8350_dacmutes[] = { "Fast", "Slow" }; static const char *wm8350_adcfilter[] = { "None", "High Pass" }; static const char *wm8350_adchp[] = { "44.1kHz", "8kHz", "16kHz", "32kHz" }; static const char *wm8350_lr[] = { "Left", "Right" }; static const struct soc_enum wm8350_enum[] = { SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 4, 4, wm8350_deemp), SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 0, 4, wm8350_pol), SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 14, 2, wm8350_dacmutem), SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 13, 2, wm8350_dacmutes), SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 15, 2, wm8350_adcfilter), SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 8, 4, wm8350_adchp), SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 0, 4, wm8350_pol), SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr), }; static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525); static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600); static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1); static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1); static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1); static const unsigned int capture_sd_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 12, TLV_DB_SCALE_ITEM(-3600, 300, 1), 13, 15, TLV_DB_SCALE_ITEM(0, 0, 0), }; static const struct snd_kcontrol_new wm8350_snd_controls[] = { SOC_ENUM("Playback Deemphasis", wm8350_enum[0]), SOC_ENUM("Playback DAC Inversion", wm8350_enum[1]), SOC_WM8350_DOUBLE_R_TLV("Playback PCM Volume", WM8350_DAC_DIGITAL_VOLUME_L, WM8350_DAC_DIGITAL_VOLUME_R, 0, 255, 0, dac_pcm_tlv), SOC_ENUM("Playback PCM Mute Function", wm8350_enum[2]), SOC_ENUM("Playback PCM Mute Speed", wm8350_enum[3]), SOC_ENUM("Capture PCM Filter", wm8350_enum[4]), SOC_ENUM("Capture PCM HP Filter", wm8350_enum[5]), SOC_ENUM("Capture ADC Inversion", wm8350_enum[6]), SOC_WM8350_DOUBLE_R_TLV("Capture PCM Volume", WM8350_ADC_DIGITAL_VOLUME_L, WM8350_ADC_DIGITAL_VOLUME_R, 0, 255, 0, adc_pcm_tlv), SOC_DOUBLE_TLV("Capture Sidetone Volume", WM8350_ADC_DIVIDER, 8, 4, 15, 1, capture_sd_tlv), SOC_WM8350_DOUBLE_R_TLV("Capture Volume", WM8350_LEFT_INPUT_VOLUME, WM8350_RIGHT_INPUT_VOLUME, 2, 63, 0, pre_amp_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8350_LEFT_INPUT_VOLUME, WM8350_RIGHT_INPUT_VOLUME, 13, 1, 0), SOC_SINGLE_TLV("Left Input Left Sidetone Volume", WM8350_OUTPUT_LEFT_MIXER_VOLUME, 1, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Left Input Right Sidetone Volume", WM8350_OUTPUT_LEFT_MIXER_VOLUME, 5, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Left Input Bypass Volume", WM8350_OUTPUT_LEFT_MIXER_VOLUME, 9, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Right Input Left Sidetone Volume", WM8350_OUTPUT_RIGHT_MIXER_VOLUME, 1, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Right Input Right Sidetone Volume", WM8350_OUTPUT_RIGHT_MIXER_VOLUME, 5, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("Right Input Bypass Volume", WM8350_OUTPUT_RIGHT_MIXER_VOLUME, 13, 7, 0, out_mix_tlv), SOC_SINGLE("Left Input Mixer +20dB Switch", WM8350_INPUT_MIXER_VOLUME_L, 0, 1, 0), SOC_SINGLE("Right Input Mixer +20dB Switch", WM8350_INPUT_MIXER_VOLUME_R, 0, 1, 0), SOC_SINGLE_TLV("Out4 Capture Volume", WM8350_INPUT_MIXER_VOLUME, 1, 7, 0, out_mix_tlv), SOC_WM8350_DOUBLE_R_TLV("Out1 Playback Volume", WM8350_LOUT1_VOLUME, WM8350_ROUT1_VOLUME, 2, 63, 0, out_pga_tlv), SOC_DOUBLE_R("Out1 Playback ZC Switch", WM8350_LOUT1_VOLUME, WM8350_ROUT1_VOLUME, 13, 1, 0), SOC_WM8350_DOUBLE_R_TLV("Out2 Playback Volume", WM8350_LOUT2_VOLUME, WM8350_ROUT2_VOLUME, 2, 63, 0, out_pga_tlv), SOC_DOUBLE_R("Out2 Playback ZC Switch", WM8350_LOUT2_VOLUME, WM8350_ROUT2_VOLUME, 13, 1, 0), SOC_SINGLE("Out2 Right Invert Switch", WM8350_ROUT2_VOLUME, 10, 1, 0), SOC_SINGLE_TLV("Out2 Beep Volume", WM8350_BEEP_VOLUME, 5, 7, 0, out_mix_tlv), SOC_DOUBLE_R("Out1 Playback Switch", WM8350_LOUT1_VOLUME, WM8350_ROUT1_VOLUME, 14, 1, 1), SOC_DOUBLE_R("Out2 Playback Switch", WM8350_LOUT2_VOLUME, WM8350_ROUT2_VOLUME, 14, 1, 1), }; /* * DAPM Controls */ /* Left Playback Mixer */ static const struct snd_kcontrol_new wm8350_left_play_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8350_LEFT_MIXER_CONTROL, 11, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8350_LEFT_MIXER_CONTROL, 2, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8350_LEFT_MIXER_CONTROL, 12, 1, 0), SOC_DAPM_SINGLE("Left Sidetone Switch", WM8350_LEFT_MIXER_CONTROL, 0, 1, 0), SOC_DAPM_SINGLE("Right Sidetone Switch", WM8350_LEFT_MIXER_CONTROL, 1, 1, 0), }; /* Right Playback Mixer */ static const struct snd_kcontrol_new wm8350_right_play_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8350_RIGHT_MIXER_CONTROL, 12, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8350_RIGHT_MIXER_CONTROL, 3, 1, 0), SOC_DAPM_SINGLE("Left Playback Switch", WM8350_RIGHT_MIXER_CONTROL, 11, 1, 0), SOC_DAPM_SINGLE("Left Sidetone Switch", WM8350_RIGHT_MIXER_CONTROL, 0, 1, 0), SOC_DAPM_SINGLE("Right Sidetone Switch", WM8350_RIGHT_MIXER_CONTROL, 1, 1, 0), }; /* Out4 Mixer */ static const struct snd_kcontrol_new wm8350_out4_mixer_controls[] = { SOC_DAPM_SINGLE("Right Playback Switch", WM8350_OUT4_MIXER_CONTROL, 12, 1, 0), SOC_DAPM_SINGLE("Left Playback Switch", WM8350_OUT4_MIXER_CONTROL, 11, 1, 0), SOC_DAPM_SINGLE("Right Capture Switch", WM8350_OUT4_MIXER_CONTROL, 9, 1, 0), SOC_DAPM_SINGLE("Out3 Playback Switch", WM8350_OUT4_MIXER_CONTROL, 2, 1, 0), SOC_DAPM_SINGLE("Right Mixer Switch", WM8350_OUT4_MIXER_CONTROL, 1, 1, 0), SOC_DAPM_SINGLE("Left Mixer Switch", WM8350_OUT4_MIXER_CONTROL, 0, 1, 0), }; /* Out3 Mixer */ static const struct snd_kcontrol_new wm8350_out3_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8350_OUT3_MIXER_CONTROL, 11, 1, 0), SOC_DAPM_SINGLE("Left Capture Switch", WM8350_OUT3_MIXER_CONTROL, 8, 1, 0), SOC_DAPM_SINGLE("Out4 Playback Switch", WM8350_OUT3_MIXER_CONTROL, 3, 1, 0), SOC_DAPM_SINGLE("Left Mixer Switch", WM8350_OUT3_MIXER_CONTROL, 0, 1, 0), }; /* Left Input Mixer */ static const struct snd_kcontrol_new wm8350_left_capt_mixer_controls[] = { SOC_DAPM_SINGLE_TLV("L2 Capture Volume", WM8350_INPUT_MIXER_VOLUME_L, 1, 7, 0, out_mix_tlv), SOC_DAPM_SINGLE_TLV("L3 Capture Volume", WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv), SOC_DAPM_SINGLE("PGA Capture Switch", WM8350_LEFT_INPUT_VOLUME, 14, 1, 1), }; /* Right Input Mixer */ static const struct snd_kcontrol_new wm8350_right_capt_mixer_controls[] = { SOC_DAPM_SINGLE_TLV("L2 Capture Volume", WM8350_INPUT_MIXER_VOLUME_R, 5, 7, 0, out_mix_tlv), SOC_DAPM_SINGLE_TLV("L3 Capture Volume", WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv), SOC_DAPM_SINGLE("PGA Capture Switch", WM8350_RIGHT_INPUT_VOLUME, 14, 1, 1), }; /* Left Mic Mixer */ static const struct snd_kcontrol_new wm8350_left_mic_mixer_controls[] = { SOC_DAPM_SINGLE("INN Capture Switch", WM8350_INPUT_CONTROL, 1, 1, 0), SOC_DAPM_SINGLE("INP Capture Switch", WM8350_INPUT_CONTROL, 0, 1, 0), SOC_DAPM_SINGLE("IN2 Capture Switch", WM8350_INPUT_CONTROL, 2, 1, 0), }; /* Right Mic Mixer */ static const struct snd_kcontrol_new wm8350_right_mic_mixer_controls[] = { SOC_DAPM_SINGLE("INN Capture Switch", WM8350_INPUT_CONTROL, 9, 1, 0), SOC_DAPM_SINGLE("INP Capture Switch", WM8350_INPUT_CONTROL, 8, 1, 0), SOC_DAPM_SINGLE("IN2 Capture Switch", WM8350_INPUT_CONTROL, 10, 1, 0), }; /* Beep Switch */ static const struct snd_kcontrol_new wm8350_beep_switch_controls = SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VOLUME, 15, 1, 1); /* Out4 Capture Mux */ static const struct snd_kcontrol_new wm8350_out4_capture_controls = SOC_DAPM_ENUM("Route", wm8350_enum[7]); static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = { SND_SOC_DAPM_PGA("IN3R PGA", WM8350_POWER_MGMT_2, 11, 0, NULL, 0), SND_SOC_DAPM_PGA("IN3L PGA", WM8350_POWER_MGMT_2, 10, 0, NULL, 0), SND_SOC_DAPM_PGA_E("Right Out2 PGA", WM8350_POWER_MGMT_3, 3, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA_E("Left Out2 PGA", WM8350_POWER_MGMT_3, 2, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA_E("Right Out1 PGA", WM8350_POWER_MGMT_3, 1, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA_E("Left Out1 PGA", WM8350_POWER_MGMT_3, 0, 0, NULL, 0, pga_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_MIXER("Right Capture Mixer", WM8350_POWER_MGMT_2, 7, 0, &wm8350_right_capt_mixer_controls[0], ARRAY_SIZE(wm8350_right_capt_mixer_controls)), SND_SOC_DAPM_MIXER("Left Capture Mixer", WM8350_POWER_MGMT_2, 6, 0, &wm8350_left_capt_mixer_controls[0], ARRAY_SIZE(wm8350_left_capt_mixer_controls)), SND_SOC_DAPM_MIXER("Out4 Mixer", WM8350_POWER_MGMT_2, 5, 0, &wm8350_out4_mixer_controls[0], ARRAY_SIZE(wm8350_out4_mixer_controls)), SND_SOC_DAPM_MIXER("Out3 Mixer", WM8350_POWER_MGMT_2, 4, 0, &wm8350_out3_mixer_controls[0], ARRAY_SIZE(wm8350_out3_mixer_controls)), SND_SOC_DAPM_MIXER("Right Playback Mixer", WM8350_POWER_MGMT_2, 1, 0, &wm8350_right_play_mixer_controls[0], ARRAY_SIZE(wm8350_right_play_mixer_controls)), SND_SOC_DAPM_MIXER("Left Playback Mixer", WM8350_POWER_MGMT_2, 0, 0, &wm8350_left_play_mixer_controls[0], ARRAY_SIZE(wm8350_left_play_mixer_controls)), SND_SOC_DAPM_MIXER("Left Mic Mixer", WM8350_POWER_MGMT_2, 8, 0, &wm8350_left_mic_mixer_controls[0], ARRAY_SIZE(wm8350_left_mic_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mic Mixer", WM8350_POWER_MGMT_2, 9, 0, &wm8350_right_mic_mixer_controls[0], ARRAY_SIZE(wm8350_right_mic_mixer_controls)), /* virtual mixer for Beep and Out2R */ SND_SOC_DAPM_MIXER("Out2 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_SWITCH("Beep", WM8350_POWER_MGMT_3, 7, 0, &wm8350_beep_switch_controls), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8350_POWER_MGMT_4, 3, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8350_POWER_MGMT_4, 2, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8350_POWER_MGMT_4, 5, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8350_POWER_MGMT_4, 4, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", WM8350_POWER_MGMT_1, 4, 0), SND_SOC_DAPM_MUX("Out4 Capture Channel", SND_SOC_NOPM, 0, 0, &wm8350_out4_capture_controls), SND_SOC_DAPM_OUTPUT("OUT1R"), SND_SOC_DAPM_OUTPUT("OUT1L"), SND_SOC_DAPM_OUTPUT("OUT2R"), SND_SOC_DAPM_OUTPUT("OUT2L"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_OUTPUT("OUT4"), SND_SOC_DAPM_INPUT("IN1RN"), SND_SOC_DAPM_INPUT("IN1RP"), SND_SOC_DAPM_INPUT("IN2R"), SND_SOC_DAPM_INPUT("IN1LP"), SND_SOC_DAPM_INPUT("IN1LN"), SND_SOC_DAPM_INPUT("IN2L"), SND_SOC_DAPM_INPUT("IN3R"), SND_SOC_DAPM_INPUT("IN3L"), }; static const struct snd_soc_dapm_route audio_map[] = { /* left playback mixer */ {"Left Playback Mixer", "Playback Switch", "Left DAC"}, {"Left Playback Mixer", "Left Bypass Switch", "IN3L PGA"}, {"Left Playback Mixer", "Right Playback Switch", "Right DAC"}, {"Left Playback Mixer", "Left Sidetone Switch", "Left Mic Mixer"}, {"Left Playback Mixer", "Right Sidetone Switch", "Right Mic Mixer"}, /* right playback mixer */ {"Right Playback Mixer", "Playback Switch", "Right DAC"}, {"Right Playback Mixer", "Right Bypass Switch", "IN3R PGA"}, {"Right Playback Mixer", "Left Playback Switch", "Left DAC"}, {"Right Playback Mixer", "Left Sidetone Switch", "Left Mic Mixer"}, {"Right Playback Mixer", "Right Sidetone Switch", "Right Mic Mixer"}, /* out4 playback mixer */ {"Out4 Mixer", "Right Playback Switch", "Right DAC"}, {"Out4 Mixer", "Left Playback Switch", "Left DAC"}, {"Out4 Mixer", "Right Capture Switch", "Right Capture Mixer"}, {"Out4 Mixer", "Out3 Playback Switch", "Out3 Mixer"}, {"Out4 Mixer", "Right Mixer Switch", "Right Playback Mixer"}, {"Out4 Mixer", "Left Mixer Switch", "Left Playback Mixer"}, {"OUT4", NULL, "Out4 Mixer"}, /* out3 playback mixer */ {"Out3 Mixer", "Left Playback Switch", "Left DAC"}, {"Out3 Mixer", "Left Capture Switch", "Left Capture Mixer"}, {"Out3 Mixer", "Left Mixer Switch", "Left Playback Mixer"}, {"Out3 Mixer", "Out4 Playback Switch", "Out4 Mixer"}, {"OUT3", NULL, "Out3 Mixer"}, /* out2 */ {"Right Out2 PGA", NULL, "Right Playback Mixer"}, {"Left Out2 PGA", NULL, "Left Playback Mixer"}, {"OUT2L", NULL, "Left Out2 PGA"}, {"OUT2R", NULL, "Right Out2 PGA"}, /* out1 */ {"Right Out1 PGA", NULL, "Right Playback Mixer"}, {"Left Out1 PGA", NULL, "Left Playback Mixer"}, {"OUT1L", NULL, "Left Out1 PGA"}, {"OUT1R", NULL, "Right Out1 PGA"}, /* ADCs */ {"Left ADC", NULL, "Left Capture Mixer"}, {"Right ADC", NULL, "Right Capture Mixer"}, /* Left capture mixer */ {"Left Capture Mixer", "L2 Capture Volume", "IN2L"}, {"Left Capture Mixer", "L3 Capture Volume", "IN3L PGA"}, {"Left Capture Mixer", "PGA Capture Switch", "Left Mic Mixer"}, {"Left Capture Mixer", NULL, "Out4 Capture Channel"}, /* Right capture mixer */ {"Right Capture Mixer", "L2 Capture Volume", "IN2R"}, {"Right Capture Mixer", "L3 Capture Volume", "IN3R PGA"}, {"Right Capture Mixer", "PGA Capture Switch", "Right Mic Mixer"}, {"Right Capture Mixer", NULL, "Out4 Capture Channel"}, /* L3 Inputs */ {"IN3L PGA", NULL, "IN3L"}, {"IN3R PGA", NULL, "IN3R"}, /* Left Mic mixer */ {"Left Mic Mixer", "INN Capture Switch", "IN1LN"}, {"Left Mic Mixer", "INP Capture Switch", "IN1LP"}, {"Left Mic Mixer", "IN2 Capture Switch", "IN2L"}, /* Right Mic mixer */ {"Right Mic Mixer", "INN Capture Switch", "IN1RN"}, {"Right Mic Mixer", "INP Capture Switch", "IN1RP"}, {"Right Mic Mixer", "IN2 Capture Switch", "IN2R"}, /* out 4 capture */ {"Out4 Capture Channel", NULL, "Out4 Mixer"}, /* Beep */ {"Beep", NULL, "IN3R PGA"}, }; static int wm8350_add_widgets(struct snd_soc_codec *codec) { int ret; ret = snd_soc_dapm_new_controls(codec, wm8350_dapm_widgets, ARRAY_SIZE(wm8350_dapm_widgets)); if (ret != 0) { dev_err(codec->dev, "dapm control register failed\n"); return ret; } /* set up audio paths */ ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); if (ret != 0) { dev_err(codec->dev, "DAPM route register failed\n"); return ret; } return snd_soc_dapm_new_widgets(codec); } static int wm8350_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8350 *wm8350 = codec->control_data; u16 fll_4; switch (clk_id) { case WM8350_MCLK_SEL_MCLK: wm8350_clear_bits(wm8350, WM8350_CLOCK_CONTROL_1, WM8350_MCLK_SEL); break; case WM8350_MCLK_SEL_PLL_MCLK: case WM8350_MCLK_SEL_PLL_DAC: case WM8350_MCLK_SEL_PLL_ADC: case WM8350_MCLK_SEL_PLL_32K: wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_1, WM8350_MCLK_SEL); fll_4 = wm8350_codec_read(codec, WM8350_FLL_CONTROL_4) & ~WM8350_FLL_CLK_SRC_MASK; wm8350_codec_write(codec, WM8350_FLL_CONTROL_4, fll_4 | clk_id); break; } /* MCLK direction */ if (dir == WM8350_MCLK_DIR_OUT) wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, WM8350_MCLK_DIR); else wm8350_clear_bits(wm8350, WM8350_CLOCK_CONTROL_2, WM8350_MCLK_DIR); return 0; } static int wm8350_set_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 val; switch (div_id) { case WM8350_ADC_CLKDIV: val = wm8350_codec_read(codec, WM8350_ADC_DIVIDER) & ~WM8350_ADC_CLKDIV_MASK; wm8350_codec_write(codec, WM8350_ADC_DIVIDER, val | div); break; case WM8350_DAC_CLKDIV: val = wm8350_codec_read(codec, WM8350_DAC_CLOCK_CONTROL) & ~WM8350_DAC_CLKDIV_MASK; wm8350_codec_write(codec, WM8350_DAC_CLOCK_CONTROL, val | div); break; case WM8350_BCLK_CLKDIV: val = wm8350_codec_read(codec, WM8350_CLOCK_CONTROL_1) & ~WM8350_BCLK_DIV_MASK; wm8350_codec_write(codec, WM8350_CLOCK_CONTROL_1, val | div); break; case WM8350_OPCLK_CLKDIV: val = wm8350_codec_read(codec, WM8350_CLOCK_CONTROL_1) & ~WM8350_OPCLK_DIV_MASK; wm8350_codec_write(codec, WM8350_CLOCK_CONTROL_1, val | div); break; case WM8350_SYS_CLKDIV: val = wm8350_codec_read(codec, WM8350_CLOCK_CONTROL_1) & ~WM8350_MCLK_DIV_MASK; wm8350_codec_write(codec, WM8350_CLOCK_CONTROL_1, val | div); break; case WM8350_DACLR_CLKDIV: val = wm8350_codec_read(codec, WM8350_DAC_LR_RATE) & ~WM8350_DACLRC_RATE_MASK; wm8350_codec_write(codec, WM8350_DAC_LR_RATE, val | div); break; case WM8350_ADCLR_CLKDIV: val = wm8350_codec_read(codec, WM8350_ADC_LR_RATE) & ~WM8350_ADCLRC_RATE_MASK; wm8350_codec_write(codec, WM8350_ADC_LR_RATE, val | div); break; default: return -EINVAL; } return 0; } static int wm8350_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) & ~(WM8350_AIF_BCLK_INV | WM8350_AIF_LRCLK_INV | WM8350_AIF_FMT_MASK); u16 master = wm8350_codec_read(codec, WM8350_AI_DAC_CONTROL) & ~WM8350_BCLK_MSTR; u16 dac_lrc = wm8350_codec_read(codec, WM8350_DAC_LR_RATE) & ~WM8350_DACLRC_ENA; u16 adc_lrc = wm8350_codec_read(codec, WM8350_ADC_LR_RATE) & ~WM8350_ADCLRC_ENA; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: master |= WM8350_BCLK_MSTR; dac_lrc |= WM8350_DACLRC_ENA; adc_lrc |= WM8350_ADCLRC_ENA; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x2 << 8; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x1 << 8; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x3 << 8; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x3 << 8 | WM8350_AIF_LRCLK_INV; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= WM8350_AIF_LRCLK_INV | WM8350_AIF_BCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: iface |= WM8350_AIF_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: iface |= WM8350_AIF_LRCLK_INV; break; default: return -EINVAL; } wm8350_codec_write(codec, WM8350_AI_FORMATING, iface); wm8350_codec_write(codec, WM8350_AI_DAC_CONTROL, master); wm8350_codec_write(codec, WM8350_DAC_LR_RATE, dac_lrc); wm8350_codec_write(codec, WM8350_ADC_LR_RATE, adc_lrc); return 0; } static int wm8350_pcm_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *codec_dai) { struct snd_soc_codec *codec = codec_dai->codec; int master = wm8350_codec_cache_read(codec, WM8350_AI_DAC_CONTROL) & WM8350_BCLK_MSTR; int enabled = 0; /* Check that the DACs or ADCs are enabled since they are * required for LRC in master mode. The DACs or ADCs need a * valid audio path i.e. pin -> ADC or DAC -> pin before * the LRC will be enabled in master mode. */ if (!master || cmd != SNDRV_PCM_TRIGGER_START) return 0; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { enabled = wm8350_codec_cache_read(codec, WM8350_POWER_MGMT_4) & (WM8350_ADCR_ENA | WM8350_ADCL_ENA); } else { enabled = wm8350_codec_cache_read(codec, WM8350_POWER_MGMT_4) & (WM8350_DACR_ENA | WM8350_DACL_ENA); } if (!enabled) { dev_err(codec->dev, "%s: invalid audio path - no clocks available\n", __func__); return -EINVAL; } return 0; } static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *codec_dai) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8350 *wm8350 = codec->control_data; u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) & ~WM8350_AIF_WL_MASK; /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x1 << 10; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x2 << 10; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x3 << 10; break; } wm8350_codec_write(codec, WM8350_AI_FORMATING, iface); /* The sloping stopband filter is recommended for use with * lower sample rates to improve performance. */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (params_rate(params) < 24000) wm8350_set_bits(wm8350, WM8350_DAC_MUTE_VOLUME, WM8350_DAC_SB_FILT); else wm8350_clear_bits(wm8350, WM8350_DAC_MUTE_VOLUME, WM8350_DAC_SB_FILT); } return 0; } static int wm8350_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; struct wm8350 *wm8350 = codec->control_data; if (mute) wm8350_set_bits(wm8350, WM8350_DAC_MUTE, WM8350_DAC_MUTE_ENA); else wm8350_clear_bits(wm8350, WM8350_DAC_MUTE, WM8350_DAC_MUTE_ENA); return 0; } /* FLL divisors */ struct _fll_div { int div; /* FLL_OUTDIV */ int n; int k; int ratio; /* FLL_FRATIO */ }; /* The size in bits of the fll divide multiplied by 10 * to allow rounding later */ #define FIXED_FLL_SIZE ((1 << 16) * 10) static inline int fll_factors(struct _fll_div *fll_div, unsigned int input, unsigned int output) { u64 Kpart; unsigned int t1, t2, K, Nmod; if (output >= 2815250 && output <= 3125000) fll_div->div = 0x4; else if (output >= 5625000 && output <= 6250000) fll_div->div = 0x3; else if (output >= 11250000 && output <= 12500000) fll_div->div = 0x2; else if (output >= 22500000 && output <= 25000000) fll_div->div = 0x1; else { printk(KERN_ERR "wm8350: fll freq %d out of range\n", output); return -EINVAL; } if (input > 48000) fll_div->ratio = 1; else fll_div->ratio = 8; t1 = output * (1 << (fll_div->div + 1)); t2 = input * fll_div->ratio; fll_div->n = t1 / t2; Nmod = t1 % t2; if (Nmod) { Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, t2); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; fll_div->k = K; } else fll_div->k = 0; return 0; } static int wm8350_set_fll(struct snd_soc_dai *codec_dai, int pll_id, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8350 *wm8350 = codec->control_data; struct wm8350_data *priv = codec->private_data; struct _fll_div fll_div; int ret = 0; u16 fll_1, fll_4; if (freq_in == priv->fll_freq_in && freq_out == priv->fll_freq_out) return 0; /* power down FLL - we need to do this for reconfiguration */ wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_ENA | WM8350_FLL_OSC_ENA); if (freq_out == 0 || freq_in == 0) return ret; ret = fll_factors(&fll_div, freq_in, freq_out); if (ret < 0) return ret; dev_dbg(wm8350->dev, "FLL in %u FLL out %u N 0x%x K 0x%x div %d ratio %d", freq_in, freq_out, fll_div.n, fll_div.k, fll_div.div, fll_div.ratio); /* set up N.K & dividers */ fll_1 = wm8350_codec_read(codec, WM8350_FLL_CONTROL_1) & ~(WM8350_FLL_OUTDIV_MASK | WM8350_FLL_RSP_RATE_MASK | 0xc000); wm8350_codec_write(codec, WM8350_FLL_CONTROL_1, fll_1 | (fll_div.div << 8) | 0x50); wm8350_codec_write(codec, WM8350_FLL_CONTROL_2, (fll_div.ratio << 11) | (fll_div. n & WM8350_FLL_N_MASK)); wm8350_codec_write(codec, WM8350_FLL_CONTROL_3, fll_div.k); fll_4 = wm8350_codec_read(codec, WM8350_FLL_CONTROL_4) & ~(WM8350_FLL_FRAC | WM8350_FLL_SLOW_LOCK_REF); wm8350_codec_write(codec, WM8350_FLL_CONTROL_4, fll_4 | (fll_div.k ? WM8350_FLL_FRAC : 0) | (fll_div.ratio == 8 ? WM8350_FLL_SLOW_LOCK_REF : 0)); /* power FLL on */ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_OSC_ENA); wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_ENA); priv->fll_freq_out = freq_out; priv->fll_freq_in = freq_in; return 0; } static int wm8350_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8350 *wm8350 = codec->control_data; struct wm8350_data *priv = codec->private_data; struct wm8350_audio_platform_data *platform = wm8350->codec.platform_data; u16 pm1; int ret; switch (level) { case SND_SOC_BIAS_ON: pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1) & ~(WM8350_VMID_MASK | WM8350_CODEC_ISEL_MASK); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1 | WM8350_VMID_50K | platform->codec_current_on << 14); break; case SND_SOC_BIAS_PREPARE: pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1); pm1 &= ~WM8350_VMID_MASK; wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1 | WM8350_VMID_50K); break; case SND_SOC_BIAS_STANDBY: if (codec->bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret != 0) return ret; /* Enable the system clock */ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_SYSCLK_ENA); /* mute DAC & outputs */ wm8350_set_bits(wm8350, WM8350_DAC_MUTE, WM8350_DAC_MUTE_ENA); /* discharge cap memory */ wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, platform->dis_out1 | (platform->dis_out2 << 2) | (platform->dis_out3 << 4) | (platform->dis_out4 << 6)); /* wait for discharge */ schedule_timeout_interruptible(msecs_to_jiffies (platform-> cap_discharge_msecs)); /* enable antipop */ wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, (platform->vmid_s_curve << 8)); /* ramp up vmid */ wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, (platform-> codec_current_charge << 14) | WM8350_VMID_5K | WM8350_VMIDEN | WM8350_VBUFEN); /* wait for vmid */ schedule_timeout_interruptible(msecs_to_jiffies (platform-> vmid_charge_msecs)); /* turn on vmid 300k */ pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1) & ~(WM8350_VMID_MASK | WM8350_CODEC_ISEL_MASK); pm1 |= WM8350_VMID_300K | (platform->codec_current_standby << 14); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); /* enable analogue bias */ pm1 |= WM8350_BIASEN; wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); /* disable antipop */ wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, 0); } else { /* turn on vmid 300k and reduce current */ pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1) & ~(WM8350_VMID_MASK | WM8350_CODEC_ISEL_MASK); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1 | WM8350_VMID_300K | (platform-> codec_current_standby << 14)); } break; case SND_SOC_BIAS_OFF: /* mute DAC & enable outputs */ wm8350_set_bits(wm8350, WM8350_DAC_MUTE, WM8350_DAC_MUTE_ENA); wm8350_set_bits(wm8350, WM8350_POWER_MGMT_3, WM8350_OUT1L_ENA | WM8350_OUT1R_ENA | WM8350_OUT2L_ENA | WM8350_OUT2R_ENA); /* enable anti pop S curve */ wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, (platform->vmid_s_curve << 8)); /* turn off vmid */ pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1) & ~WM8350_VMIDEN; wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); /* wait */ schedule_timeout_interruptible(msecs_to_jiffies (platform-> vmid_discharge_msecs)); wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, (platform->vmid_s_curve << 8) | platform->dis_out1 | (platform->dis_out2 << 2) | (platform->dis_out3 << 4) | (platform->dis_out4 << 6)); /* turn off VBuf and drain */ pm1 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_1) & ~(WM8350_VBUFEN | WM8350_VMID_MASK); wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1 | WM8350_OUTPUT_DRAIN_EN); /* wait */ schedule_timeout_interruptible(msecs_to_jiffies (platform->drain_msecs)); pm1 &= ~WM8350_BIASEN; wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); /* disable anti-pop */ wm8350_reg_write(wm8350, WM8350_ANTI_POP_CONTROL, 0); wm8350_clear_bits(wm8350, WM8350_LOUT1_VOLUME, WM8350_OUT1L_ENA); wm8350_clear_bits(wm8350, WM8350_ROUT1_VOLUME, WM8350_OUT1R_ENA); wm8350_clear_bits(wm8350, WM8350_LOUT2_VOLUME, WM8350_OUT2L_ENA); wm8350_clear_bits(wm8350, WM8350_ROUT2_VOLUME, WM8350_OUT2R_ENA); /* disable clock gen */ wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_SYSCLK_ENA); regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies); break; } codec->bias_level = level; return 0; } static int wm8350_suspend(struct platform_device *pdev, pm_message_t state) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8350_resume(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; wm8350_set_bias_level(codec, SND_SOC_BIAS_STANDBY); if (codec->suspend_bias_level == SND_SOC_BIAS_ON) wm8350_set_bias_level(codec, SND_SOC_BIAS_ON); return 0; } static void wm8350_hp_jack_handler(struct wm8350 *wm8350, int irq, void *data) { struct wm8350_data *priv = data; u16 reg; int report; int mask; struct wm8350_jack_data *jack = NULL; switch (irq) { case WM8350_IRQ_CODEC_JCK_DET_L: jack = &priv->hpl; mask = WM8350_JACK_L_LVL; break; case WM8350_IRQ_CODEC_JCK_DET_R: jack = &priv->hpr; mask = WM8350_JACK_R_LVL; break; default: BUG(); } if (!jack->jack) { dev_warn(wm8350->dev, "Jack interrupt called with no jack\n"); return; } /* Debounce */ msleep(200); reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS); if (reg & mask) report = jack->report; else report = 0; snd_soc_jack_report(jack->jack, report, jack->report); } /** * wm8350_hp_jack_detect - Enable headphone jack detection. * * @codec: WM8350 codec * @which: left or right jack detect signal * @jack: jack to report detection events on * @report: value to report * * Enables the headphone jack detection of the WM8350. */ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which, struct snd_soc_jack *jack, int report) { struct wm8350_data *priv = codec->private_data; struct wm8350 *wm8350 = codec->control_data; int irq; int ena; switch (which) { case WM8350_JDL: priv->hpl.jack = jack; priv->hpl.report = report; irq = WM8350_IRQ_CODEC_JCK_DET_L; ena = WM8350_JDL_ENA; break; case WM8350_JDR: priv->hpr.jack = jack; priv->hpr.report = report; irq = WM8350_IRQ_CODEC_JCK_DET_R; ena = WM8350_JDR_ENA; break; default: return -EINVAL; } wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA); wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena); /* Sync status */ wm8350_hp_jack_handler(wm8350, irq, priv); wm8350_unmask_irq(wm8350, irq); return 0; } EXPORT_SYMBOL_GPL(wm8350_hp_jack_detect); static struct snd_soc_codec *wm8350_codec; static int wm8350_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; struct wm8350 *wm8350; struct wm8350_data *priv; int ret; struct wm8350_output *out1; struct wm8350_output *out2; BUG_ON(!wm8350_codec); socdev->card->codec = wm8350_codec; codec = socdev->card->codec; wm8350 = codec->control_data; priv = codec->private_data; /* Enable the codec */ wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA); /* Enable robust clocking mode in ADC */ wm8350_codec_write(codec, WM8350_SECURITY, 0xa7); wm8350_codec_write(codec, 0xde, 0x13); wm8350_codec_write(codec, WM8350_SECURITY, 0); /* read OUT1 & OUT2 volumes */ out1 = &priv->out1; out2 = &priv->out2; out1->left_vol = (wm8350_reg_read(wm8350, WM8350_LOUT1_VOLUME) & WM8350_OUT1L_VOL_MASK) >> WM8350_OUT1L_VOL_SHIFT; out1->right_vol = (wm8350_reg_read(wm8350, WM8350_ROUT1_VOLUME) & WM8350_OUT1R_VOL_MASK) >> WM8350_OUT1R_VOL_SHIFT; out2->left_vol = (wm8350_reg_read(wm8350, WM8350_LOUT2_VOLUME) & WM8350_OUT2L_VOL_MASK) >> WM8350_OUT1L_VOL_SHIFT; out2->right_vol = (wm8350_reg_read(wm8350, WM8350_ROUT2_VOLUME) & WM8350_OUT2R_VOL_MASK) >> WM8350_OUT1R_VOL_SHIFT; wm8350_reg_write(wm8350, WM8350_LOUT1_VOLUME, 0); wm8350_reg_write(wm8350, WM8350_ROUT1_VOLUME, 0); wm8350_reg_write(wm8350, WM8350_LOUT2_VOLUME, 0); wm8350_reg_write(wm8350, WM8350_ROUT2_VOLUME, 0); /* Latch VU bits & mute */ wm8350_set_bits(wm8350, WM8350_LOUT1_VOLUME, WM8350_OUT1_VU | WM8350_OUT1L_MUTE); wm8350_set_bits(wm8350, WM8350_LOUT2_VOLUME, WM8350_OUT2_VU | WM8350_OUT2L_MUTE); wm8350_set_bits(wm8350, WM8350_ROUT1_VOLUME, WM8350_OUT1_VU | WM8350_OUT1R_MUTE); wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, WM8350_OUT2_VU | WM8350_OUT2R_MUTE); wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L); wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R); wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, wm8350_hp_jack_handler, priv); wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, wm8350_hp_jack_handler, priv); ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { dev_err(&pdev->dev, "failed to create pcms\n"); return ret; } snd_soc_add_controls(codec, wm8350_snd_controls, ARRAY_SIZE(wm8350_snd_controls)); wm8350_add_widgets(codec); wm8350_set_bias_level(codec, SND_SOC_BIAS_STANDBY); ret = snd_soc_init_card(socdev); if (ret < 0) { dev_err(&pdev->dev, "failed to register card\n"); goto card_err; } return 0; card_err: snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); return ret; } static int wm8350_remove(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; struct wm8350 *wm8350 = codec->control_data; struct wm8350_data *priv = codec->private_data; int ret; wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, WM8350_JDL_ENA | WM8350_JDR_ENA); wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA); wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L); wm8350_mask_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R); wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L); wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R); priv->hpl.jack = NULL; priv->hpr.jack = NULL; /* cancel any work waiting to be queued. */ ret = cancel_delayed_work(&codec->delayed_work); /* if there was any work waiting then we run it now and * wait for its completion */ if (ret) { schedule_delayed_work(&codec->delayed_work, 0); flush_scheduled_work(); } wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF); wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA); return 0; } #define WM8350_RATES (SNDRV_PCM_RATE_8000_96000) #define WM8350_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static struct snd_soc_dai_ops wm8350_dai_ops = { .hw_params = wm8350_pcm_hw_params, .digital_mute = wm8350_mute, .trigger = wm8350_pcm_trigger, .set_fmt = wm8350_set_dai_fmt, .set_sysclk = wm8350_set_dai_sysclk, .set_pll = wm8350_set_fll, .set_clkdiv = wm8350_set_clkdiv, }; struct snd_soc_dai wm8350_dai = { .name = "WM8350", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8350_RATES, .formats = WM8350_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8350_RATES, .formats = WM8350_FORMATS, }, .ops = &wm8350_dai_ops, }; EXPORT_SYMBOL_GPL(wm8350_dai); struct snd_soc_codec_device soc_codec_dev_wm8350 = { .probe = wm8350_probe, .remove = wm8350_remove, .suspend = wm8350_suspend, .resume = wm8350_resume, }; EXPORT_SYMBOL_GPL(soc_codec_dev_wm8350); static __devinit int wm8350_codec_probe(struct platform_device *pdev) { struct wm8350 *wm8350 = platform_get_drvdata(pdev); struct wm8350_data *priv; struct snd_soc_codec *codec; int ret, i; if (wm8350->codec.platform_data == NULL) { dev_err(&pdev->dev, "No audio platform data supplied\n"); return -EINVAL; } priv = kzalloc(sizeof(struct wm8350_data), GFP_KERNEL); if (priv == NULL) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(supply_names); i++) priv->supplies[i].supply = supply_names[i]; ret = regulator_bulk_get(wm8350->dev, ARRAY_SIZE(priv->supplies), priv->supplies); if (ret != 0) goto err_priv; codec = &priv->codec; wm8350->codec.codec = codec; wm8350_dai.dev = &pdev->dev; mutex_init(&codec->mutex); INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->dev = &pdev->dev; codec->name = "WM8350"; codec->owner = THIS_MODULE; codec->read = wm8350_codec_read; codec->write = wm8350_codec_write; codec->bias_level = SND_SOC_BIAS_OFF; codec->set_bias_level = wm8350_set_bias_level; codec->dai = &wm8350_dai; codec->num_dai = 1; codec->reg_cache_size = WM8350_MAX_REGISTER; codec->private_data = priv; codec->control_data = wm8350; /* Put the codec into reset if it wasn't already */ wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA); INIT_DELAYED_WORK(&codec->delayed_work, wm8350_pga_work); ret = snd_soc_register_codec(codec); if (ret != 0) goto err_supply; wm8350_codec = codec; ret = snd_soc_register_dai(&wm8350_dai); if (ret != 0) goto err_codec; return 0; err_codec: snd_soc_unregister_codec(codec); err_supply: regulator_bulk_free(ARRAY_SIZE(priv->supplies), priv->supplies); err_priv: kfree(priv); wm8350_codec = NULL; return ret; } static int __devexit wm8350_codec_remove(struct platform_device *pdev) { struct wm8350 *wm8350 = platform_get_drvdata(pdev); struct snd_soc_codec *codec = wm8350->codec.codec; struct wm8350_data *priv = codec->private_data; snd_soc_unregister_dai(&wm8350_dai); snd_soc_unregister_codec(codec); regulator_bulk_free(ARRAY_SIZE(priv->supplies), priv->supplies); kfree(priv); wm8350_codec = NULL; return 0; } #ifdef CONFIG_PM static int wm8350_codec_suspend(struct platform_device *pdev, pm_message_t m) { return snd_soc_suspend_device(&pdev->dev); } static int wm8350_codec_resume(struct platform_device *pdev) { return snd_soc_resume_device(&pdev->dev); } #else #define wm8350_codec_suspend NULL #define wm8350_codec_resume NULL #endif static struct platform_driver wm8350_codec_driver = { .driver = { .name = "wm8350-codec", .owner = THIS_MODULE, }, .probe = wm8350_codec_probe, .remove = __devexit_p(wm8350_codec_remove), .suspend = wm8350_codec_suspend, .resume = wm8350_codec_resume, }; static __init int wm8350_init(void) { return platform_driver_register(&wm8350_codec_driver); } module_init(wm8350_init); static __exit void wm8350_exit(void) { platform_driver_unregister(&wm8350_codec_driver); } module_exit(wm8350_exit); MODULE_DESCRIPTION("ASoC WM8350 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8350-codec");
gpl-2.0
HarveyHunt/linux
arch/sparc/kernel/sun4m_smp.c
603
6143
// SPDX-License-Identifier: GPL-2.0 /* * sun4m SMP support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/delay.h> #include <linux/sched/mm.h> #include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/switch_to.h> #include <asm/tlbflush.h> #include <asm/timer.h> #include <asm/oplib.h> #include "irq.h" #include "kernel.h" #define IRQ_IPI_SINGLE 12 #define IRQ_IPI_MASK 13 #define IRQ_IPI_RESCHED 14 #define IRQ_CROSS_CALL 15 static inline unsigned long swap_ulong(volatile unsigned long *ptr, unsigned long val) { __asm__ __volatile__("swap [%1], %0\n\t" : "=&r" (val), "=&r" (ptr) : "0" (val), "1" (ptr)); return val; } void sun4m_cpu_pre_starting(void *arg) { } void sun4m_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); /* Allow master to continue. The master will then give us the * go-ahead by setting the smp_commenced_mask and will wait without * timeouts until our setup is completed fully (signified by * our bit being set in the cpu_online_mask). */ swap_ulong(&cpu_callin_map[cpuid], 1); /* XXX: What's up with all the flushes? */ local_ops->cache_all(); local_ops->tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r" (&current_set[cpuid]) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ mmgrab(&init_mm); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); } /* * Cycle through the processors asking the PROM to start each one. */ void __init smp4m_boot_cpus(void) { sun4m_unmask_profile_irq(); local_ops->cache_all(); } int smp4m_boot_one_cpu(int i, struct task_struct *idle) { unsigned long *entry = &sun4m_cpu_startup; int timeout; int cpu_node; cpu_find_by_mid(i, &cpu_node); current_set[i] = task_thread_info(idle); /* See trampoline.S for details... */ entry += ((i - 1) * 3); /* * Initialize the contexts table * Since the call to prom_startcpu() trashes the structure, * we need to re-initialize it for each cpu */ smp_penguin_ctable.which_io = 0; smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; smp_penguin_ctable.reg_size = 0; /* whirrr, whirrr, whirrrrrrrrr... */ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); local_ops->cache_all(); prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); /* wheee... it's going... */ for (timeout = 0; timeout < 10000; timeout++) { if (cpu_callin_map[i]) break; udelay(200); } if (!(cpu_callin_map[i])) { printk(KERN_ERR "Processor %d is stuck.\n", i); return -ENODEV; } local_ops->cache_all(); return 0; } void __init smp4m_smp_done(void) { int i, first; int *prev; /* setup cpu list for irq rotation */ first = 0; prev = &first; for_each_online_cpu(i) { *prev = i; prev = &cpu_data(i).next; } *prev = first; local_ops->cache_all(); /* Ok, they are spinning and ready to go. */ } static void sun4m_send_ipi(int cpu, int level) { sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set); } static void sun4m_ipi_resched(int cpu) { sun4m_send_ipi(cpu, IRQ_IPI_RESCHED); } static void sun4m_ipi_single(int cpu) { sun4m_send_ipi(cpu, IRQ_IPI_SINGLE); } static void sun4m_ipi_mask_one(int cpu) { sun4m_send_ipi(cpu, IRQ_IPI_MASK); } static struct smp_funcall { smpfunc_t func; unsigned long arg1; unsigned long arg2; unsigned long arg3; unsigned long arg4; unsigned long arg5; unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ } ccall_info; static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { register int ncpus = SUN4M_NCPUS; unsigned long flags; spin_lock_irqsave(&cross_call_lock, flags); /* Init function glue. */ ccall_info.func = func; ccall_info.arg1 = arg1; ccall_info.arg2 = arg2; ccall_info.arg3 = arg3; ccall_info.arg4 = arg4; ccall_info.arg5 = 0; /* Init receive/complete mapping, plus fire the IPI's off. */ { register int i; cpumask_clear_cpu(smp_processor_id(), &mask); cpumask_and(&mask, cpu_online_mask, &mask); for (i = 0; i < ncpus; i++) { if (cpumask_test_cpu(i, &mask)) { ccall_info.processors_in[i] = 0; ccall_info.processors_out[i] = 0; sun4m_send_ipi(i, IRQ_CROSS_CALL); } else { ccall_info.processors_in[i] = 1; ccall_info.processors_out[i] = 1; } } } { register int i; i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_in[i]) barrier(); } while (++i < ncpus); i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_out[i]) barrier(); } while (++i < ncpus); } spin_unlock_irqrestore(&cross_call_lock, flags); } /* Running cross calls. */ void smp4m_cross_call_irq(void) { int i = smp_processor_id(); ccall_info.processors_in[i] = 1; ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, ccall_info.arg5); ccall_info.processors_out[i] = 1; } void smp4m_percpu_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs; struct clock_event_device *ce; int cpu = smp_processor_id(); old_regs = set_irq_regs(regs); ce = &per_cpu(sparc32_clockevent, cpu); if (clockevent_state_periodic(ce)) sun4m_clear_profile_irq(cpu); else sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */ irq_enter(); ce->event_handler(ce); irq_exit(); set_irq_regs(old_regs); } static const struct sparc32_ipi_ops sun4m_ipi_ops = { .cross_call = sun4m_cross_call, .resched = sun4m_ipi_resched, .single = sun4m_ipi_single, .mask_one = sun4m_ipi_mask_one, }; void __init sun4m_init_smp(void) { sparc32_ipi_ops = &sun4m_ipi_ops; }
gpl-2.0
YogeshNain/linux
drivers/power/bq25890_charger.c
859
25502
/* * TI BQ25890 charger driver * * Copyright (C) 2015 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/power_supply.h> #include <linux/regmap.h> #include <linux/types.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/usb/phy.h> #include <linux/acpi.h> #include <linux/of.h> #define BQ25890_MANUFACTURER "Texas Instruments" #define BQ25890_IRQ_PIN "bq25890_irq" #define BQ25890_ID 3 enum bq25890_fields { F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */ F_BHOT, F_BCOLD, F_VINDPM_OFS, /* Reg01 */ F_CONV_START, F_CONV_RATE, F_BOOSTF, F_ICO_EN, F_HVDCP_EN, F_MAXC_EN, F_FORCE_DPM, F_AUTO_DPDM_EN, /* Reg02 */ F_BAT_LOAD_EN, F_WD_RST, F_OTG_CFG, F_CHG_CFG, F_SYSVMIN, /* Reg03 */ F_PUMPX_EN, F_ICHG, /* Reg04 */ F_IPRECHG, F_ITERM, /* Reg05 */ F_VREG, F_BATLOWV, F_VRECHG, /* Reg06 */ F_TERM_EN, F_STAT_DIS, F_WD, F_TMR_EN, F_CHG_TMR, F_JEITA_ISET, /* Reg07 */ F_BATCMP, F_VCLAMP, F_TREG, /* Reg08 */ F_FORCE_ICO, F_TMR2X_EN, F_BATFET_DIS, F_JEITA_VSET, F_BATFET_DLY, F_BATFET_RST_EN, F_PUMPX_UP, F_PUMPX_DN, /* Reg09 */ F_BOOSTV, F_BOOSTI, /* Reg0A */ F_VBUS_STAT, F_CHG_STAT, F_PG_STAT, F_SDP_STAT, F_VSYS_STAT, /* Reg0B */ F_WD_FAULT, F_BOOST_FAULT, F_CHG_FAULT, F_BAT_FAULT, F_NTC_FAULT, /* Reg0C */ F_FORCE_VINDPM, F_VINDPM, /* Reg0D */ F_THERM_STAT, F_BATV, /* Reg0E */ F_SYSV, /* Reg0F */ F_TSPCT, /* Reg10 */ F_VBUS_GD, F_VBUSV, /* Reg11 */ F_ICHGR, /* Reg12 */ F_VDPM_STAT, F_IDPM_STAT, F_IDPM_LIM, /* Reg13 */ F_REG_RST, F_ICO_OPTIMIZED, F_PN, F_TS_PROFILE, F_DEV_REV, /* Reg14 */ F_MAX_FIELDS }; /* initial field values, converted to register values */ struct bq25890_init_data { u8 ichg; /* charge current */ u8 vreg; /* regulation voltage */ u8 iterm; /* termination current */ u8 iprechg; /* precharge current */ u8 sysvmin; /* minimum system voltage limit */ u8 boostv; /* boost regulation voltage */ u8 boosti; /* boost current limit */ u8 boostf; /* boost frequency */ u8 ilim_en; /* enable ILIM pin */ u8 treg; /* thermal regulation threshold */ }; struct bq25890_state { u8 online; u8 chrg_status; u8 chrg_fault; u8 vsys_status; u8 boost_fault; u8 bat_fault; }; struct bq25890_device { struct i2c_client *client; struct device *dev; struct power_supply *charger; struct usb_phy *usb_phy; struct notifier_block usb_nb; struct work_struct usb_work; unsigned long usb_event; struct regmap *rmap; struct regmap_field *rmap_fields[F_MAX_FIELDS]; int chip_id; struct bq25890_init_data init_data; struct bq25890_state state; struct mutex lock; /* protect state data */ }; static const struct regmap_range bq25890_readonly_reg_ranges[] = { regmap_reg_range(0x0b, 0x0c), regmap_reg_range(0x0e, 0x13), }; static const struct regmap_access_table bq25890_writeable_regs = { .no_ranges = bq25890_readonly_reg_ranges, .n_no_ranges = ARRAY_SIZE(bq25890_readonly_reg_ranges), }; static const struct regmap_range bq25890_volatile_reg_ranges[] = { regmap_reg_range(0x00, 0x00), regmap_reg_range(0x09, 0x09), regmap_reg_range(0x0b, 0x0c), regmap_reg_range(0x0e, 0x14), }; static const struct regmap_access_table bq25890_volatile_regs = { .yes_ranges = bq25890_volatile_reg_ranges, .n_yes_ranges = ARRAY_SIZE(bq25890_volatile_reg_ranges), }; static const struct regmap_config bq25890_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x14, .cache_type = REGCACHE_RBTREE, .wr_table = &bq25890_writeable_regs, .volatile_table = &bq25890_volatile_regs, }; static const struct reg_field bq25890_reg_fields[] = { /* REG00 */ [F_EN_HIZ] = REG_FIELD(0x00, 7, 7), [F_EN_ILIM] = REG_FIELD(0x00, 6, 6), [F_IILIM] = REG_FIELD(0x00, 0, 5), /* REG01 */ [F_BHOT] = REG_FIELD(0x01, 6, 7), [F_BCOLD] = REG_FIELD(0x01, 5, 5), [F_VINDPM_OFS] = REG_FIELD(0x01, 0, 4), /* REG02 */ [F_CONV_START] = REG_FIELD(0x02, 7, 7), [F_CONV_RATE] = REG_FIELD(0x02, 6, 6), [F_BOOSTF] = REG_FIELD(0x02, 5, 5), [F_ICO_EN] = REG_FIELD(0x02, 4, 4), [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3), [F_MAXC_EN] = REG_FIELD(0x02, 2, 2), [F_FORCE_DPM] = REG_FIELD(0x02, 1, 1), [F_AUTO_DPDM_EN] = REG_FIELD(0x02, 0, 0), /* REG03 */ [F_BAT_LOAD_EN] = REG_FIELD(0x03, 7, 7), [F_WD_RST] = REG_FIELD(0x03, 6, 6), [F_OTG_CFG] = REG_FIELD(0x03, 5, 5), [F_CHG_CFG] = REG_FIELD(0x03, 4, 4), [F_SYSVMIN] = REG_FIELD(0x03, 1, 3), /* REG04 */ [F_PUMPX_EN] = REG_FIELD(0x04, 7, 7), [F_ICHG] = REG_FIELD(0x04, 0, 6), /* REG05 */ [F_IPRECHG] = REG_FIELD(0x05, 4, 7), [F_ITERM] = REG_FIELD(0x05, 0, 3), /* REG06 */ [F_VREG] = REG_FIELD(0x06, 2, 7), [F_BATLOWV] = REG_FIELD(0x06, 1, 1), [F_VRECHG] = REG_FIELD(0x06, 0, 0), /* REG07 */ [F_TERM_EN] = REG_FIELD(0x07, 7, 7), [F_STAT_DIS] = REG_FIELD(0x07, 6, 6), [F_WD] = REG_FIELD(0x07, 4, 5), [F_TMR_EN] = REG_FIELD(0x07, 3, 3), [F_CHG_TMR] = REG_FIELD(0x07, 1, 2), [F_JEITA_ISET] = REG_FIELD(0x07, 0, 0), /* REG08 */ [F_BATCMP] = REG_FIELD(0x08, 6, 7), [F_VCLAMP] = REG_FIELD(0x08, 2, 4), [F_TREG] = REG_FIELD(0x08, 0, 1), /* REG09 */ [F_FORCE_ICO] = REG_FIELD(0x09, 7, 7), [F_TMR2X_EN] = REG_FIELD(0x09, 6, 6), [F_BATFET_DIS] = REG_FIELD(0x09, 5, 5), [F_JEITA_VSET] = REG_FIELD(0x09, 4, 4), [F_BATFET_DLY] = REG_FIELD(0x09, 3, 3), [F_BATFET_RST_EN] = REG_FIELD(0x09, 2, 2), [F_PUMPX_UP] = REG_FIELD(0x09, 1, 1), [F_PUMPX_DN] = REG_FIELD(0x09, 0, 0), /* REG0A */ [F_BOOSTV] = REG_FIELD(0x0A, 4, 7), [F_BOOSTI] = REG_FIELD(0x0A, 0, 2), /* REG0B */ [F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7), [F_CHG_STAT] = REG_FIELD(0x0B, 3, 4), [F_PG_STAT] = REG_FIELD(0x0B, 2, 2), [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1), [F_VSYS_STAT] = REG_FIELD(0x0B, 0, 0), /* REG0C */ [F_WD_FAULT] = REG_FIELD(0x0C, 7, 7), [F_BOOST_FAULT] = REG_FIELD(0x0C, 6, 6), [F_CHG_FAULT] = REG_FIELD(0x0C, 4, 5), [F_BAT_FAULT] = REG_FIELD(0x0C, 3, 3), [F_NTC_FAULT] = REG_FIELD(0x0C, 0, 2), /* REG0D */ [F_FORCE_VINDPM] = REG_FIELD(0x0D, 7, 7), [F_VINDPM] = REG_FIELD(0x0D, 0, 6), /* REG0E */ [F_THERM_STAT] = REG_FIELD(0x0E, 7, 7), [F_BATV] = REG_FIELD(0x0E, 0, 6), /* REG0F */ [F_SYSV] = REG_FIELD(0x0F, 0, 6), /* REG10 */ [F_TSPCT] = REG_FIELD(0x10, 0, 6), /* REG11 */ [F_VBUS_GD] = REG_FIELD(0x11, 7, 7), [F_VBUSV] = REG_FIELD(0x11, 0, 6), /* REG12 */ [F_ICHGR] = REG_FIELD(0x12, 0, 6), /* REG13 */ [F_VDPM_STAT] = REG_FIELD(0x13, 7, 7), [F_IDPM_STAT] = REG_FIELD(0x13, 6, 6), [F_IDPM_LIM] = REG_FIELD(0x13, 0, 5), /* REG14 */ [F_REG_RST] = REG_FIELD(0x14, 7, 7), [F_ICO_OPTIMIZED] = REG_FIELD(0x14, 6, 6), [F_PN] = REG_FIELD(0x14, 3, 5), [F_TS_PROFILE] = REG_FIELD(0x14, 2, 2), [F_DEV_REV] = REG_FIELD(0x14, 0, 1) }; /* * Most of the val -> idx conversions can be computed, given the minimum, * maximum and the step between values. For the rest of conversions, we use * lookup tables. */ enum bq25890_table_ids { /* range tables */ TBL_ICHG, TBL_ITERM, TBL_IPRECHG, TBL_VREG, TBL_BATCMP, TBL_VCLAMP, TBL_BOOSTV, TBL_SYSVMIN, /* lookup tables */ TBL_TREG, TBL_BOOSTI, }; /* Thermal Regulation Threshold lookup table, in degrees Celsius */ static const u32 bq25890_treg_tbl[] = { 60, 80, 100, 120 }; #define BQ25890_TREG_TBL_SIZE ARRAY_SIZE(bq25890_treg_tbl) /* Boost mode current limit lookup table, in uA */ static const u32 bq25890_boosti_tbl[] = { 500000, 700000, 1100000, 1300000, 1600000, 1800000, 2100000, 2400000 }; #define BQ25890_BOOSTI_TBL_SIZE ARRAY_SIZE(bq25890_boosti_tbl) struct bq25890_range { u32 min; u32 max; u32 step; }; struct bq25890_lookup { const u32 *tbl; u32 size; }; static const union { struct bq25890_range rt; struct bq25890_lookup lt; } bq25890_tables[] = { /* range tables */ [TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */ [TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */ [TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */ [TBL_BATCMP] = { .rt = {0, 140, 20} }, /* mOhm */ [TBL_VCLAMP] = { .rt = {0, 224000, 32000} }, /* uV */ [TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */ [TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */ /* lookup tables */ [TBL_TREG] = { .lt = {bq25890_treg_tbl, BQ25890_TREG_TBL_SIZE} }, [TBL_BOOSTI] = { .lt = {bq25890_boosti_tbl, BQ25890_BOOSTI_TBL_SIZE} } }; static int bq25890_field_read(struct bq25890_device *bq, enum bq25890_fields field_id) { int ret; int val; ret = regmap_field_read(bq->rmap_fields[field_id], &val); if (ret < 0) return ret; return val; } static int bq25890_field_write(struct bq25890_device *bq, enum bq25890_fields field_id, u8 val) { return regmap_field_write(bq->rmap_fields[field_id], val); } static u8 bq25890_find_idx(u32 value, enum bq25890_table_ids id) { u8 idx; if (id >= TBL_TREG) { const u32 *tbl = bq25890_tables[id].lt.tbl; u32 tbl_size = bq25890_tables[id].lt.size; for (idx = 1; idx < tbl_size && tbl[idx] <= value; idx++) ; } else { const struct bq25890_range *rtbl = &bq25890_tables[id].rt; u8 rtbl_size; rtbl_size = (rtbl->max - rtbl->min) / rtbl->step + 1; for (idx = 1; idx < rtbl_size && (idx * rtbl->step + rtbl->min <= value); idx++) ; } return idx - 1; } static u32 bq25890_find_val(u8 idx, enum bq25890_table_ids id) { const struct bq25890_range *rtbl; /* lookup table? */ if (id >= TBL_TREG) return bq25890_tables[id].lt.tbl[idx]; /* range table */ rtbl = &bq25890_tables[id].rt; return (rtbl->min + idx * rtbl->step); } enum bq25890_status { STATUS_NOT_CHARGING, STATUS_PRE_CHARGING, STATUS_FAST_CHARGING, STATUS_TERMINATION_DONE, }; enum bq25890_chrg_fault { CHRG_FAULT_NORMAL, CHRG_FAULT_INPUT, CHRG_FAULT_THERMAL_SHUTDOWN, CHRG_FAULT_TIMER_EXPIRED, }; static int bq25890_power_supply_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret; struct bq25890_device *bq = power_supply_get_drvdata(psy); struct bq25890_state state; mutex_lock(&bq->lock); state = bq->state; mutex_unlock(&bq->lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (!state.online) val->intval = POWER_SUPPLY_STATUS_DISCHARGING; else if (state.chrg_status == STATUS_NOT_CHARGING) val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else if (state.chrg_status == STATUS_PRE_CHARGING || state.chrg_status == STATUS_FAST_CHARGING) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (state.chrg_status == STATUS_TERMINATION_DONE) val->intval = POWER_SUPPLY_STATUS_FULL; else val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = BQ25890_MANUFACTURER; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = state.online; break; case POWER_SUPPLY_PROP_HEALTH: if (!state.chrg_fault && !state.bat_fault && !state.boost_fault) val->intval = POWER_SUPPLY_HEALTH_GOOD; else if (state.bat_fault) val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE; else if (state.chrg_fault == CHRG_FAULT_TIMER_EXPIRED) val->intval = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE; else if (state.chrg_fault == CHRG_FAULT_THERMAL_SHUTDOWN) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */ if (ret < 0) return ret; /* converted_val = ADC_val * 50mA (table 10.3.19) */ val->intval = ret * 50000; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: val->intval = bq25890_tables[TBL_ICHG].rt.max; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: if (!state.online) { val->intval = 0; break; } ret = bq25890_field_read(bq, F_BATV); /* read measured value */ if (ret < 0) return ret; /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */ val->intval = 2304000 + ret * 20000; break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: val->intval = bq25890_tables[TBL_VREG].rt.max; break; case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM); break; default: return -EINVAL; } return 0; } static int bq25890_get_chip_state(struct bq25890_device *bq, struct bq25890_state *state) { int i, ret; struct { enum bq25890_fields id; u8 *data; } state_fields[] = { {F_CHG_STAT, &state->chrg_status}, {F_PG_STAT, &state->online}, {F_VSYS_STAT, &state->vsys_status}, {F_BOOST_FAULT, &state->boost_fault}, {F_BAT_FAULT, &state->bat_fault}, {F_CHG_FAULT, &state->chrg_fault} }; for (i = 0; i < ARRAY_SIZE(state_fields); i++) { ret = bq25890_field_read(bq, state_fields[i].id); if (ret < 0) return ret; *state_fields[i].data = ret; } dev_dbg(bq->dev, "S:CHG/PG/VSYS=%d/%d/%d, F:CHG/BOOST/BAT=%d/%d/%d\n", state->chrg_status, state->online, state->vsys_status, state->chrg_fault, state->boost_fault, state->bat_fault); return 0; } static bool bq25890_state_changed(struct bq25890_device *bq, struct bq25890_state *new_state) { struct bq25890_state old_state; mutex_lock(&bq->lock); old_state = bq->state; mutex_unlock(&bq->lock); return (old_state.chrg_status != new_state->chrg_status || old_state.chrg_fault != new_state->chrg_fault || old_state.online != new_state->online || old_state.bat_fault != new_state->bat_fault || old_state.boost_fault != new_state->boost_fault || old_state.vsys_status != new_state->vsys_status); } static void bq25890_handle_state_change(struct bq25890_device *bq, struct bq25890_state *new_state) { int ret; struct bq25890_state old_state; mutex_lock(&bq->lock); old_state = bq->state; mutex_unlock(&bq->lock); if (!new_state->online) { /* power removed */ /* disable ADC */ ret = bq25890_field_write(bq, F_CONV_START, 0); if (ret < 0) goto error; } else if (!old_state.online) { /* power inserted */ /* enable ADC, to have control of charge current/voltage */ ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) goto error; } return; error: dev_err(bq->dev, "Error communicating with the chip.\n"); } static irqreturn_t bq25890_irq_handler_thread(int irq, void *private) { struct bq25890_device *bq = private; int ret; struct bq25890_state state; ret = bq25890_get_chip_state(bq, &state); if (ret < 0) goto handled; if (!bq25890_state_changed(bq, &state)) goto handled; bq25890_handle_state_change(bq, &state); mutex_lock(&bq->lock); bq->state = state; mutex_unlock(&bq->lock); power_supply_changed(bq->charger); handled: return IRQ_HANDLED; } static int bq25890_chip_reset(struct bq25890_device *bq) { int ret; int rst_check_counter = 10; ret = bq25890_field_write(bq, F_REG_RST, 1); if (ret < 0) return ret; do { ret = bq25890_field_read(bq, F_REG_RST); if (ret < 0) return ret; usleep_range(5, 10); } while (ret == 1 && --rst_check_counter); if (!rst_check_counter) return -ETIMEDOUT; return 0; } static int bq25890_hw_init(struct bq25890_device *bq) { int ret; int i; struct bq25890_state state; const struct { enum bq25890_fields id; u32 value; } init_data[] = { {F_ICHG, bq->init_data.ichg}, {F_VREG, bq->init_data.vreg}, {F_ITERM, bq->init_data.iterm}, {F_IPRECHG, bq->init_data.iprechg}, {F_SYSVMIN, bq->init_data.sysvmin}, {F_BOOSTV, bq->init_data.boostv}, {F_BOOSTI, bq->init_data.boosti}, {F_BOOSTF, bq->init_data.boostf}, {F_EN_ILIM, bq->init_data.ilim_en}, {F_TREG, bq->init_data.treg} }; ret = bq25890_chip_reset(bq); if (ret < 0) return ret; /* disable watchdog */ ret = bq25890_field_write(bq, F_WD, 0); if (ret < 0) return ret; /* initialize currents/voltages and other parameters */ for (i = 0; i < ARRAY_SIZE(init_data); i++) { ret = bq25890_field_write(bq, init_data[i].id, init_data[i].value); if (ret < 0) return ret; } /* Configure ADC for continuous conversions. This does not enable it. */ ret = bq25890_field_write(bq, F_CONV_RATE, 1); if (ret < 0) return ret; ret = bq25890_get_chip_state(bq, &state); if (ret < 0) return ret; mutex_lock(&bq->lock); bq->state = state; mutex_unlock(&bq->lock); return 0; } static enum power_supply_property bq25890_power_supply_props[] = { POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, }; static char *bq25890_charger_supplied_to[] = { "main-battery", }; static const struct power_supply_desc bq25890_power_supply_desc = { .name = "bq25890-charger", .type = POWER_SUPPLY_TYPE_USB, .properties = bq25890_power_supply_props, .num_properties = ARRAY_SIZE(bq25890_power_supply_props), .get_property = bq25890_power_supply_get_property, }; static int bq25890_power_supply_init(struct bq25890_device *bq) { struct power_supply_config psy_cfg = { .drv_data = bq, }; psy_cfg.supplied_to = bq25890_charger_supplied_to; psy_cfg.num_supplicants = ARRAY_SIZE(bq25890_charger_supplied_to); bq->charger = power_supply_register(bq->dev, &bq25890_power_supply_desc, &psy_cfg); return PTR_ERR_OR_ZERO(bq->charger); } static void bq25890_usb_work(struct work_struct *data) { int ret; struct bq25890_device *bq = container_of(data, struct bq25890_device, usb_work); switch (bq->usb_event) { case USB_EVENT_ID: /* Enable boost mode */ ret = bq25890_field_write(bq, F_OTG_CFG, 1); if (ret < 0) goto error; break; case USB_EVENT_NONE: /* Disable boost mode */ ret = bq25890_field_write(bq, F_OTG_CFG, 0); if (ret < 0) goto error; power_supply_changed(bq->charger); break; } return; error: dev_err(bq->dev, "Error switching to boost/charger mode.\n"); } static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val, void *priv) { struct bq25890_device *bq = container_of(nb, struct bq25890_device, usb_nb); bq->usb_event = val; queue_work(system_power_efficient_wq, &bq->usb_work); return NOTIFY_OK; } static int bq25890_irq_probe(struct bq25890_device *bq) { struct gpio_desc *irq; irq = devm_gpiod_get_index(bq->dev, BQ25890_IRQ_PIN, 0, GPIOD_IN); if (IS_ERR(irq)) { dev_err(bq->dev, "Could not probe irq pin.\n"); return PTR_ERR(irq); } return gpiod_to_irq(irq); } static int bq25890_fw_read_u32_props(struct bq25890_device *bq) { int ret; u32 property; int i; struct bq25890_init_data *init = &bq->init_data; struct { char *name; bool optional; enum bq25890_table_ids tbl_id; u8 *conv_data; /* holds converted value from given property */ } props[] = { /* required properties */ {"ti,charge-current", false, TBL_ICHG, &init->ichg}, {"ti,battery-regulation-voltage", false, TBL_VREG, &init->vreg}, {"ti,termination-current", false, TBL_ITERM, &init->iterm}, {"ti,precharge-current", false, TBL_ITERM, &init->iprechg}, {"ti,minimum-sys-voltage", false, TBL_SYSVMIN, &init->sysvmin}, {"ti,boost-voltage", false, TBL_BOOSTV, &init->boostv}, {"ti,boost-max-current", false, TBL_BOOSTI, &init->boosti}, /* optional properties */ {"ti,thermal-regulation-threshold", true, TBL_TREG, &init->treg} }; /* initialize data for optional properties */ init->treg = 3; /* 120 degrees Celsius */ for (i = 0; i < ARRAY_SIZE(props); i++) { ret = device_property_read_u32(bq->dev, props[i].name, &property); if (ret < 0) { if (props[i].optional) continue; return ret; } *props[i].conv_data = bq25890_find_idx(property, props[i].tbl_id); } return 0; } static int bq25890_fw_probe(struct bq25890_device *bq) { int ret; struct bq25890_init_data *init = &bq->init_data; ret = bq25890_fw_read_u32_props(bq); if (ret < 0) return ret; init->ilim_en = device_property_read_bool(bq->dev, "ti,use-ilim-pin"); init->boostf = device_property_read_bool(bq->dev, "ti,boost-low-freq"); return 0; } static int bq25890_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct device *dev = &client->dev; struct bq25890_device *bq; int ret; int i; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(dev, "No support for SMBUS_BYTE_DATA\n"); return -ENODEV; } bq = devm_kzalloc(dev, sizeof(*bq), GFP_KERNEL); if (!bq) return -ENOMEM; bq->client = client; bq->dev = dev; mutex_init(&bq->lock); bq->rmap = devm_regmap_init_i2c(client, &bq25890_regmap_config); if (IS_ERR(bq->rmap)) { dev_err(dev, "failed to allocate register map\n"); return PTR_ERR(bq->rmap); } for (i = 0; i < ARRAY_SIZE(bq25890_reg_fields); i++) { const struct reg_field *reg_fields = bq25890_reg_fields; bq->rmap_fields[i] = devm_regmap_field_alloc(dev, bq->rmap, reg_fields[i]); if (IS_ERR(bq->rmap_fields[i])) { dev_err(dev, "cannot allocate regmap field\n"); return PTR_ERR(bq->rmap_fields[i]); } } i2c_set_clientdata(client, bq); bq->chip_id = bq25890_field_read(bq, F_PN); if (bq->chip_id < 0) { dev_err(dev, "Cannot read chip ID.\n"); return bq->chip_id; } if (bq->chip_id != BQ25890_ID) { dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id); return -ENODEV; } if (!dev->platform_data) { ret = bq25890_fw_probe(bq); if (ret < 0) { dev_err(dev, "Cannot read device properties.\n"); return ret; } } else { return -ENODEV; } ret = bq25890_hw_init(bq); if (ret < 0) { dev_err(dev, "Cannot initialize the chip.\n"); return ret; } if (client->irq <= 0) client->irq = bq25890_irq_probe(bq); if (client->irq < 0) { dev_err(dev, "No irq resource found.\n"); return client->irq; } /* OTG reporting */ bq->usb_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); if (!IS_ERR_OR_NULL(bq->usb_phy)) { INIT_WORK(&bq->usb_work, bq25890_usb_work); bq->usb_nb.notifier_call = bq25890_usb_notifier; usb_register_notifier(bq->usb_phy, &bq->usb_nb); } ret = devm_request_threaded_irq(dev, client->irq, NULL, bq25890_irq_handler_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, BQ25890_IRQ_PIN, bq); if (ret) goto irq_fail; ret = bq25890_power_supply_init(bq); if (ret < 0) { dev_err(dev, "Failed to register power supply\n"); goto irq_fail; } return 0; irq_fail: if (!IS_ERR_OR_NULL(bq->usb_phy)) usb_unregister_notifier(bq->usb_phy, &bq->usb_nb); return ret; } static int bq25890_remove(struct i2c_client *client) { struct bq25890_device *bq = i2c_get_clientdata(client); power_supply_unregister(bq->charger); if (!IS_ERR_OR_NULL(bq->usb_phy)) usb_unregister_notifier(bq->usb_phy, &bq->usb_nb); /* reset all registers to default values */ bq25890_chip_reset(bq); return 0; } #ifdef CONFIG_PM_SLEEP static int bq25890_suspend(struct device *dev) { struct bq25890_device *bq = dev_get_drvdata(dev); /* * If charger is removed, while in suspend, make sure ADC is diabled * since it consumes slightly more power. */ return bq25890_field_write(bq, F_CONV_START, 0); } static int bq25890_resume(struct device *dev) { int ret; struct bq25890_state state; struct bq25890_device *bq = dev_get_drvdata(dev); ret = bq25890_get_chip_state(bq, &state); if (ret < 0) return ret; mutex_lock(&bq->lock); bq->state = state; mutex_unlock(&bq->lock); /* Re-enable ADC only if charger is plugged in. */ if (state.online) { ret = bq25890_field_write(bq, F_CONV_START, 1); if (ret < 0) return ret; } /* signal userspace, maybe state changed while suspended */ power_supply_changed(bq->charger); return 0; } #endif static const struct dev_pm_ops bq25890_pm = { SET_SYSTEM_SLEEP_PM_OPS(bq25890_suspend, bq25890_resume) }; static const struct i2c_device_id bq25890_i2c_ids[] = { { "bq25890", 0 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq25890_i2c_ids); static const struct of_device_id bq25890_of_match[] = { { .compatible = "ti,bq25890", }, { }, }; MODULE_DEVICE_TABLE(of, bq25890_of_match); static const struct acpi_device_id bq25890_acpi_match[] = { {"BQ258900", 0}, {}, }; MODULE_DEVICE_TABLE(acpi, bq25890_acpi_match); static struct i2c_driver bq25890_driver = { .driver = { .name = "bq25890-charger", .of_match_table = of_match_ptr(bq25890_of_match), .acpi_match_table = ACPI_PTR(bq25890_acpi_match), .pm = &bq25890_pm, }, .probe = bq25890_probe, .remove = bq25890_remove, .id_table = bq25890_i2c_ids, }; module_i2c_driver(bq25890_driver); MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>"); MODULE_DESCRIPTION("bq25890 charger driver"); MODULE_LICENSE("GPL");
gpl-2.0
timmytim/honeybutter_kernel
arch/blackfin/mm/sram-alloc.c
859
20418
/* * SRAM allocator for Blackfin on-chip memory * * Copyright 2004-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/rtc.h> #include <linux/slab.h> #include <asm/blackfin.h> #include <asm/mem_map.h> #include "blackfin_sram.h" /* the data structure for L1 scratchpad and DATA SRAM */ struct sram_piece { void *paddr; int size; pid_t pid; struct sram_piece *next; }; static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock); static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head); static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head); #if L1_DATA_A_LENGTH != 0 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head); static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head); #endif #if L1_DATA_B_LENGTH != 0 static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head); static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head); #endif #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock); #endif #if L1_CODE_LENGTH != 0 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock); static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head); static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head); #endif #if L2_LENGTH != 0 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; static struct sram_piece free_l2_sram_head, used_l2_sram_head; #endif static struct kmem_cache *sram_piece_cache; /* L1 Scratchpad SRAM initialization function */ static void __init l1sram_init(void) { unsigned int cpu; unsigned long reserve; #ifdef CONFIG_SMP reserve = 0; #else reserve = sizeof(struct l1_scratch_task_info); #endif for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { per_cpu(free_l1_ssram_head, cpu).next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!per_cpu(free_l1_ssram_head, cpu).next) { printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n"); return; } per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; per_cpu(free_l1_ssram_head, cpu).next->pid = 0; per_cpu(free_l1_ssram_head, cpu).next->next = NULL; per_cpu(used_l1_ssram_head, cpu).next = NULL; /* mutex initialize */ spin_lock_init(&per_cpu(l1sram_lock, cpu)); printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", L1_SCRATCH_LENGTH >> 10); } } static void __init l1_data_sram_init(void) { #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 unsigned int cpu; #endif #if L1_DATA_A_LENGTH != 0 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { per_cpu(free_l1_data_A_sram_head, cpu).next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n"); return; } per_cpu(free_l1_data_A_sram_head, cpu).next->paddr = (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1); per_cpu(free_l1_data_A_sram_head, cpu).next->size = L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0; per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL; per_cpu(used_l1_data_A_sram_head, cpu).next = NULL; printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n", L1_DATA_A_LENGTH >> 10, per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10); } #endif #if L1_DATA_B_LENGTH != 0 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { per_cpu(free_l1_data_B_sram_head, cpu).next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!per_cpu(free_l1_data_B_sram_head, cpu).next) { printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n"); return; } per_cpu(free_l1_data_B_sram_head, cpu).next->paddr = (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1); per_cpu(free_l1_data_B_sram_head, cpu).next->size = L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1); per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0; per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL; per_cpu(used_l1_data_B_sram_head, cpu).next = NULL; printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n", L1_DATA_B_LENGTH >> 10, per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10); /* mutex initialize */ } #endif #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) spin_lock_init(&per_cpu(l1_data_sram_lock, cpu)); #endif } static void __init l1_inst_sram_init(void) { #if L1_CODE_LENGTH != 0 unsigned int cpu; for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { per_cpu(free_l1_inst_sram_head, cpu).next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!per_cpu(free_l1_inst_sram_head, cpu).next) { printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n"); return; } per_cpu(free_l1_inst_sram_head, cpu).next->paddr = (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1); per_cpu(free_l1_inst_sram_head, cpu).next->size = L1_CODE_LENGTH - (_etext_l1 - _stext_l1); per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0; per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL; per_cpu(used_l1_inst_sram_head, cpu).next = NULL; printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n", L1_CODE_LENGTH >> 10, per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10); /* mutex initialize */ spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu)); } #endif } static void __init l2_sram_init(void) { #if L2_LENGTH != 0 free_l2_sram_head.next = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); if (!free_l2_sram_head.next) { printk(KERN_INFO "Fail to initialize L2 SRAM.\n"); return; } free_l2_sram_head.next->paddr = (void *)L2_START + (_ebss_l2 - _stext_l2); free_l2_sram_head.next->size = L2_LENGTH - (_ebss_l2 - _stext_l2); free_l2_sram_head.next->pid = 0; free_l2_sram_head.next->next = NULL; used_l2_sram_head.next = NULL; printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n", L2_LENGTH >> 10, free_l2_sram_head.next->size >> 10); /* mutex initialize */ spin_lock_init(&l2_sram_lock); #endif } static int __init bfin_sram_init(void) { sram_piece_cache = kmem_cache_create("sram_piece_cache", sizeof(struct sram_piece), 0, SLAB_PANIC, NULL); l1sram_init(); l1_data_sram_init(); l1_inst_sram_init(); l2_sram_init(); return 0; } pure_initcall(bfin_sram_init); /* SRAM allocate function */ static void *_sram_alloc(size_t size, struct sram_piece *pfree_head, struct sram_piece *pused_head) { struct sram_piece *pslot, *plast, *pavail; if (size <= 0 || !pfree_head || !pused_head) return NULL; /* Align the size */ size = (size + 3) & ~3; pslot = pfree_head->next; plast = pfree_head; /* search an available piece slot */ while (pslot != NULL && size > pslot->size) { plast = pslot; pslot = pslot->next; } if (!pslot) return NULL; if (pslot->size == size) { plast->next = pslot->next; pavail = pslot; } else { /* use atomic so our L1 allocator can be used atomically */ pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC); if (!pavail) return NULL; pavail->paddr = pslot->paddr; pavail->size = size; pslot->paddr += size; pslot->size -= size; } pavail->pid = current->pid; pslot = pused_head->next; plast = pused_head; /* insert new piece into used piece list !!! */ while (pslot != NULL && pavail->paddr < pslot->paddr) { plast = pslot; pslot = pslot->next; } pavail->next = pslot; plast->next = pavail; return pavail->paddr; } /* Allocate the largest available block. */ static void *_sram_alloc_max(struct sram_piece *pfree_head, struct sram_piece *pused_head, unsigned long *psize) { struct sram_piece *pslot, *pmax; if (!pfree_head || !pused_head) return NULL; pmax = pslot = pfree_head->next; /* search an available piece slot */ while (pslot != NULL) { if (pslot->size > pmax->size) pmax = pslot; pslot = pslot->next; } if (!pmax) return NULL; *psize = pmax->size; return _sram_alloc(*psize, pfree_head, pused_head); } /* SRAM free function */ static int _sram_free(const void *addr, struct sram_piece *pfree_head, struct sram_piece *pused_head) { struct sram_piece *pslot, *plast, *pavail; if (!pfree_head || !pused_head) return -1; /* search the relevant memory slot */ pslot = pused_head->next; plast = pused_head; /* search an available piece slot */ while (pslot != NULL && pslot->paddr != addr) { plast = pslot; pslot = pslot->next; } if (!pslot) return -1; plast->next = pslot->next; pavail = pslot; pavail->pid = 0; /* insert free pieces back to the free list */ pslot = pfree_head->next; plast = pfree_head; while (pslot != NULL && addr > pslot->paddr) { plast = pslot; pslot = pslot->next; } if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) { plast->size += pavail->size; kmem_cache_free(sram_piece_cache, pavail); } else { pavail->next = plast->next; plast->next = pavail; plast = pavail; } if (pslot && plast->paddr + plast->size == pslot->paddr) { plast->size += pslot->size; plast->next = pslot->next; kmem_cache_free(sram_piece_cache, pslot); } return 0; } int sram_free(const void *addr) { #if L1_CODE_LENGTH != 0 if (addr >= (void *)get_l1_code_start() && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH)) return l1_inst_sram_free(addr); else #endif #if L1_DATA_A_LENGTH != 0 if (addr >= (void *)get_l1_data_a_start() && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH)) return l1_data_A_sram_free(addr); else #endif #if L1_DATA_B_LENGTH != 0 if (addr >= (void *)get_l1_data_b_start() && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH)) return l1_data_B_sram_free(addr); else #endif #if L2_LENGTH != 0 if (addr >= (void *)L2_START && addr < (void *)(L2_START + L2_LENGTH)) return l2_sram_free(addr); else #endif return -1; } EXPORT_SYMBOL(sram_free); void *l1_data_A_sram_alloc(size_t size) { #if L1_DATA_A_LENGTH != 0 unsigned long flags; void *addr; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), &per_cpu(used_l1_data_A_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); return addr; #else return NULL; #endif } EXPORT_SYMBOL(l1_data_A_sram_alloc); int l1_data_A_sram_free(const void *addr) { #if L1_DATA_A_LENGTH != 0 unsigned long flags; int ret; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), &per_cpu(used_l1_data_A_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); return ret; #else return -1; #endif } EXPORT_SYMBOL(l1_data_A_sram_free); void *l1_data_B_sram_alloc(size_t size) { #if L1_DATA_B_LENGTH != 0 unsigned long flags; void *addr; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu), &per_cpu(used_l1_data_B_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); return addr; #else return NULL; #endif } EXPORT_SYMBOL(l1_data_B_sram_alloc); int l1_data_B_sram_free(const void *addr) { #if L1_DATA_B_LENGTH != 0 unsigned long flags; int ret; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu), &per_cpu(used_l1_data_B_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); return ret; #else return -1; #endif } EXPORT_SYMBOL(l1_data_B_sram_free); void *l1_data_sram_alloc(size_t size) { void *addr = l1_data_A_sram_alloc(size); if (!addr) addr = l1_data_B_sram_alloc(size); return addr; } EXPORT_SYMBOL(l1_data_sram_alloc); void *l1_data_sram_zalloc(size_t size) { void *addr = l1_data_sram_alloc(size); if (addr) memset(addr, 0x00, size); return addr; } EXPORT_SYMBOL(l1_data_sram_zalloc); int l1_data_sram_free(const void *addr) { int ret; ret = l1_data_A_sram_free(addr); if (ret == -1) ret = l1_data_B_sram_free(addr); return ret; } EXPORT_SYMBOL(l1_data_sram_free); void *l1_inst_sram_alloc(size_t size) { #if L1_CODE_LENGTH != 0 unsigned long flags; void *addr; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu), &per_cpu(used_l1_inst_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); return addr; #else return NULL; #endif } EXPORT_SYMBOL(l1_inst_sram_alloc); int l1_inst_sram_free(const void *addr) { #if L1_CODE_LENGTH != 0 unsigned long flags; int ret; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu), &per_cpu(used_l1_inst_sram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); return ret; #else return -1; #endif } EXPORT_SYMBOL(l1_inst_sram_free); /* L1 Scratchpad memory allocate function */ void *l1sram_alloc(size_t size) { unsigned long flags; void *addr; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); return addr; } /* L1 Scratchpad memory allocate function */ void *l1sram_alloc_max(size_t *psize) { unsigned long flags; void *addr; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu), psize); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); return addr; } /* L1 Scratchpad memory free function */ int l1sram_free(const void *addr) { unsigned long flags; int ret; unsigned int cpu; cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)); /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); return ret; } void *l2_sram_alloc(size_t size) { #if L2_LENGTH != 0 unsigned long flags; void *addr; /* add mutex operation */ spin_lock_irqsave(&l2_sram_lock, flags); addr = _sram_alloc(size, &free_l2_sram_head, &used_l2_sram_head); /* add mutex operation */ spin_unlock_irqrestore(&l2_sram_lock, flags); pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); return addr; #else return NULL; #endif } EXPORT_SYMBOL(l2_sram_alloc); void *l2_sram_zalloc(size_t size) { void *addr = l2_sram_alloc(size); if (addr) memset(addr, 0x00, size); return addr; } EXPORT_SYMBOL(l2_sram_zalloc); int l2_sram_free(const void *addr) { #if L2_LENGTH != 0 unsigned long flags; int ret; /* add mutex operation */ spin_lock_irqsave(&l2_sram_lock, flags); ret = _sram_free(addr, &free_l2_sram_head, &used_l2_sram_head); /* add mutex operation */ spin_unlock_irqrestore(&l2_sram_lock, flags); return ret; #else return -1; #endif } EXPORT_SYMBOL(l2_sram_free); int sram_free_with_lsl(const void *addr) { struct sram_list_struct *lsl, **tmp; struct mm_struct *mm = current->mm; for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) if ((*tmp)->addr == addr) goto found; return -1; found: lsl = *tmp; sram_free(addr); *tmp = lsl->next; kfree(lsl); return 0; } EXPORT_SYMBOL(sram_free_with_lsl); /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are * tracked. These are designed for userspace so that when a process exits, * we can safely reap their resources. */ void *sram_alloc_with_lsl(size_t size, unsigned long flags) { void *addr = NULL; struct sram_list_struct *lsl = NULL; struct mm_struct *mm = current->mm; lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL); if (!lsl) return NULL; if (flags & L1_INST_SRAM) addr = l1_inst_sram_alloc(size); if (addr == NULL && (flags & L1_DATA_A_SRAM)) addr = l1_data_A_sram_alloc(size); if (addr == NULL && (flags & L1_DATA_B_SRAM)) addr = l1_data_B_sram_alloc(size); if (addr == NULL && (flags & L2_SRAM)) addr = l2_sram_alloc(size); if (addr == NULL) { kfree(lsl); return NULL; } lsl->addr = addr; lsl->length = size; lsl->next = mm->context.sram_list; mm->context.sram_list = lsl; return addr; } EXPORT_SYMBOL(sram_alloc_with_lsl); #ifdef CONFIG_PROC_FS /* Once we get a real allocator, we'll throw all of this away. * Until then, we need some sort of visibility into the L1 alloc. */ /* Need to keep line of output the same. Currently, that is 44 bytes * (including newline). */ static int _sram_proc_read(char *buf, int *len, int count, const char *desc, struct sram_piece *pfree_head, struct sram_piece *pused_head) { struct sram_piece *pslot; if (!pfree_head || !pused_head) return -1; *len += sprintf(&buf[*len], "--- SRAM %-14s Size PID State \n", desc); /* search the relevant memory slot */ pslot = pused_head->next; while (pslot != NULL) { *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", pslot->paddr, pslot->paddr + pslot->size, pslot->size, pslot->pid, "ALLOCATED"); pslot = pslot->next; } pslot = pfree_head->next; while (pslot != NULL) { *len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n", pslot->paddr, pslot->paddr + pslot->size, pslot->size, pslot->pid, "FREE"); pslot = pslot->next; } return 0; } static int sram_proc_read(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int len = 0; unsigned int cpu; for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { if (_sram_proc_read(buf, &len, count, "Scratchpad", &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) goto not_done; #if L1_DATA_A_LENGTH != 0 if (_sram_proc_read(buf, &len, count, "L1 Data A", &per_cpu(free_l1_data_A_sram_head, cpu), &per_cpu(used_l1_data_A_sram_head, cpu))) goto not_done; #endif #if L1_DATA_B_LENGTH != 0 if (_sram_proc_read(buf, &len, count, "L1 Data B", &per_cpu(free_l1_data_B_sram_head, cpu), &per_cpu(used_l1_data_B_sram_head, cpu))) goto not_done; #endif #if L1_CODE_LENGTH != 0 if (_sram_proc_read(buf, &len, count, "L1 Instruction", &per_cpu(free_l1_inst_sram_head, cpu), &per_cpu(used_l1_inst_sram_head, cpu))) goto not_done; #endif } #if L2_LENGTH != 0 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head, &used_l2_sram_head)) goto not_done; #endif *eof = 1; not_done: return len; } static int __init sram_proc_init(void) { struct proc_dir_entry *ptr; ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL); if (!ptr) { printk(KERN_WARNING "unable to create /proc/sram\n"); return -1; } ptr->read_proc = sram_proc_read; return 0; } late_initcall(sram_proc_init); #endif
gpl-2.0
phenomx4/android_kernel_lge_m3s
arch/sh/boards/mach-microdev/irq.c
859
5407
/* * arch/sh/boards/superh/microdev/irq.c * * Copyright (C) 2003 Sean McGoogan (Sean.McGoogan@superh.com) * * SuperH SH4-202 MicroDev board support. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/system.h> #include <asm/io.h> #include <mach/microdev.h> #define NUM_EXTERNAL_IRQS 16 /* IRL0 .. IRL15 */ static const struct { unsigned char fpgaIrq; unsigned char mapped; const char *name; } fpgaIrqTable[NUM_EXTERNAL_IRQS] = { { 0, 0, "unused" }, /* IRQ #0 IRL=15 0x200 */ { MICRODEV_FPGA_IRQ_KEYBOARD, 1, "keyboard" }, /* IRQ #1 IRL=14 0x220 */ { MICRODEV_FPGA_IRQ_SERIAL1, 1, "Serial #1"}, /* IRQ #2 IRL=13 0x240 */ { MICRODEV_FPGA_IRQ_ETHERNET, 1, "Ethernet" }, /* IRQ #3 IRL=12 0x260 */ { MICRODEV_FPGA_IRQ_SERIAL2, 0, "Serial #2"}, /* IRQ #4 IRL=11 0x280 */ { 0, 0, "unused" }, /* IRQ #5 IRL=10 0x2a0 */ { 0, 0, "unused" }, /* IRQ #6 IRL=9 0x2c0 */ { MICRODEV_FPGA_IRQ_USB_HC, 1, "USB" }, /* IRQ #7 IRL=8 0x2e0 */ { MICRODEV_IRQ_PCI_INTA, 1, "PCI INTA" }, /* IRQ #8 IRL=7 0x300 */ { MICRODEV_IRQ_PCI_INTB, 1, "PCI INTB" }, /* IRQ #9 IRL=6 0x320 */ { MICRODEV_IRQ_PCI_INTC, 1, "PCI INTC" }, /* IRQ #10 IRL=5 0x340 */ { MICRODEV_IRQ_PCI_INTD, 1, "PCI INTD" }, /* IRQ #11 IRL=4 0x360 */ { MICRODEV_FPGA_IRQ_MOUSE, 1, "mouse" }, /* IRQ #12 IRL=3 0x380 */ { MICRODEV_FPGA_IRQ_IDE2, 1, "IDE #2" }, /* IRQ #13 IRL=2 0x3a0 */ { MICRODEV_FPGA_IRQ_IDE1, 1, "IDE #1" }, /* IRQ #14 IRL=1 0x3c0 */ { 0, 0, "unused" }, /* IRQ #15 IRL=0 0x3e0 */ }; #if (MICRODEV_LINUX_IRQ_KEYBOARD != 1) # error Inconsistancy in defining the IRQ# for Keyboard! #endif #if (MICRODEV_LINUX_IRQ_ETHERNET != 3) # error Inconsistancy in defining the IRQ# for Ethernet! #endif #if (MICRODEV_LINUX_IRQ_USB_HC != 7) # error Inconsistancy in defining the IRQ# for USB! #endif #if (MICRODEV_LINUX_IRQ_MOUSE != 12) # error Inconsistancy in defining the IRQ# for PS/2 Mouse! #endif #if (MICRODEV_LINUX_IRQ_IDE2 != 13) # error Inconsistancy in defining the IRQ# for secondary IDE! #endif #if (MICRODEV_LINUX_IRQ_IDE1 != 14) # error Inconsistancy in defining the IRQ# for primary IDE! #endif static void enable_microdev_irq(unsigned int irq); static void disable_microdev_irq(unsigned int irq); static void mask_and_ack_microdev(unsigned int); static struct irq_chip microdev_irq_type = { .name = "MicroDev-IRQ", .unmask = enable_microdev_irq, .mask = disable_microdev_irq, .ack = mask_and_ack_microdev, }; static void disable_microdev_irq(unsigned int irq) { unsigned int fpgaIrq; if (irq >= NUM_EXTERNAL_IRQS) return; if (!fpgaIrqTable[irq].mapped) return; fpgaIrq = fpgaIrqTable[irq].fpgaIrq; /* disable interrupts on the FPGA INTC register */ __raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTDSB_REG); } static void enable_microdev_irq(unsigned int irq) { unsigned long priorityReg, priorities, pri; unsigned int fpgaIrq; if (unlikely(irq >= NUM_EXTERNAL_IRQS)) return; if (unlikely(!fpgaIrqTable[irq].mapped)) return; pri = 15 - irq; fpgaIrq = fpgaIrqTable[irq].fpgaIrq; priorityReg = MICRODEV_FPGA_INTPRI_REG(fpgaIrq); /* set priority for the interrupt */ priorities = __raw_readl(priorityReg); priorities &= ~MICRODEV_FPGA_INTPRI_MASK(fpgaIrq); priorities |= MICRODEV_FPGA_INTPRI_LEVEL(fpgaIrq, pri); __raw_writel(priorities, priorityReg); /* enable interrupts on the FPGA INTC register */ __raw_writel(MICRODEV_FPGA_INTC_MASK(fpgaIrq), MICRODEV_FPGA_INTENB_REG); } /* This function sets the desired irq handler to be a MicroDev type */ static void __init make_microdev_irq(unsigned int irq) { disable_irq_nosync(irq); set_irq_chip_and_handler(irq, &microdev_irq_type, handle_level_irq); disable_microdev_irq(irq); } static void mask_and_ack_microdev(unsigned int irq) { disable_microdev_irq(irq); } extern void __init init_microdev_irq(void) { int i; /* disable interrupts on the FPGA INTC register */ __raw_writel(~0ul, MICRODEV_FPGA_INTDSB_REG); for (i = 0; i < NUM_EXTERNAL_IRQS; i++) make_microdev_irq(i); } extern void microdev_print_fpga_intc_status(void) { volatile unsigned int * const intenb = (unsigned int*)MICRODEV_FPGA_INTENB_REG; volatile unsigned int * const intdsb = (unsigned int*)MICRODEV_FPGA_INTDSB_REG; volatile unsigned int * const intpria = (unsigned int*)MICRODEV_FPGA_INTPRI_REG(0); volatile unsigned int * const intprib = (unsigned int*)MICRODEV_FPGA_INTPRI_REG(8); volatile unsigned int * const intpric = (unsigned int*)MICRODEV_FPGA_INTPRI_REG(16); volatile unsigned int * const intprid = (unsigned int*)MICRODEV_FPGA_INTPRI_REG(24); volatile unsigned int * const intsrc = (unsigned int*)MICRODEV_FPGA_INTSRC_REG; volatile unsigned int * const intreq = (unsigned int*)MICRODEV_FPGA_INTREQ_REG; printk("-------------------------- microdev_print_fpga_intc_status() ------------------\n"); printk("FPGA_INTENB = 0x%08x\n", *intenb); printk("FPGA_INTDSB = 0x%08x\n", *intdsb); printk("FPGA_INTSRC = 0x%08x\n", *intsrc); printk("FPGA_INTREQ = 0x%08x\n", *intreq); printk("FPGA_INTPRI[3..0] = %08x:%08x:%08x:%08x\n", *intprid, *intpric, *intprib, *intpria); printk("-------------------------------------------------------------------------------\n"); }
gpl-2.0
savoca/ifc6540
drivers/crypto/nx/nx-aes-gcm.c
2139
9530
/** * AES GCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 only. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Author: Kent Yoder <yoder1@us.ibm.com> */ #include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int gcm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); return 0; } static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); char *nonce = nx_ctx->priv.gcm.nonce; int rc; if (key_len < 4) return -EINVAL; key_len -= 4; rc = gcm_aes_nx_set_key(tfm, in_key, key_len); if (rc) goto out; memcpy(nonce, in_key + key_len, 4); out: return rc; } static int gcm_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { if (authsize > crypto_aead_alg(tfm)->maxauthsize) return -EINVAL; crypto_aead_crt(tfm)->authsize = authsize; return 0; } static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } crypto_aead_crt(tfm)->authsize = authsize; return 0; } static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) { struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; int rc = -EINVAL; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; if (req->assoclen > nx_ctx->ap->databytelen) goto out; if (req->assoclen <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->assoc); scatterwalk_copychunks(out, &walk, req->assoclen, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); rc = 0; goto out; } nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, req->assoclen); nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); out: return rc; } static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; int rc = -EINVAL; if (nbytes > nx_ctx->ap->databytelen) goto out; desc.info = nx_ctx->priv.gcm.iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ if (nbytes == 0) { char src[AES_BLOCK_SIZE] = {}; struct scatterlist sg; desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(desc.tfm)) { rc = -ENOMEM; goto out; } crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); sg_init_one(&sg, src, AES_BLOCK_SIZE); if (enc) crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); else crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); crypto_free_blkcipher(desc.tfm); rc = 0; goto out; } desc.tfm = (struct crypto_blkcipher *)req->base.tfm; csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; if (req->assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); if (rc) goto out; } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else if (req->assoclen) { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy(itag, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: return rc; } static int gcm_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); char *iv = nx_ctx->priv.gcm.iv; memcpy(iv, req->iv, 12); return gcm_aes_nx_crypt(req, 1); } static int gcm_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); char *iv = nx_ctx->priv.gcm.iv; memcpy(iv, req->iv, 12); return gcm_aes_nx_crypt(req, 0); } static int gcm4106_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); char *iv = nx_ctx->priv.gcm.iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); return gcm_aes_nx_crypt(req, 1); } static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); char *iv = nx_ctx->priv.gcm.iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); return gcm_aes_nx_crypt(req, 0); } /* tell the block cipher walk routines that this is a stream cipher by * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block * during encrypt/decrypt doesn't solve this problem, because it calls * blkcipher_walk_done under the covers, which doesn't use walk->blocksize, * but instead uses this tfm->blocksize. */ struct crypto_alg nx_gcm_aes_alg = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, .cra_init = nx_crypto_ctx_aes_gcm_init, .cra_exit = nx_crypto_ctx_exit, .cra_aead = { .ivsize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm_aes_nx_set_key, .setauthsize = gcm_aes_nx_setauthsize, .encrypt = gcm_aes_nx_encrypt, .decrypt = gcm_aes_nx_decrypt, } }; struct crypto_alg nx_gcm4106_aes_alg = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_nivaead_type, .cra_module = THIS_MODULE, .cra_init = nx_crypto_ctx_aes_gcm_init, .cra_exit = nx_crypto_ctx_exit, .cra_aead = { .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, .geniv = "seqiv", .setkey = gcm4106_aes_nx_set_key, .setauthsize = gcm4106_aes_nx_setauthsize, .encrypt = gcm4106_aes_nx_encrypt, .decrypt = gcm4106_aes_nx_decrypt, } };
gpl-2.0
run/perf-power7
drivers/usb/phy/phy-mv-usb.c
2139
21184
/* * Copyright (C) 2011 Marvell International Ltd. All rights reserved. * Author: Chao Xie <chao.xie@marvell.com> * Neil Zhang <zhangwm@marvell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/device.h> #include <linux/proc_fs.h> #include <linux/clk.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/usb/otg.h> #include <linux/usb/gadget.h> #include <linux/usb/hcd.h> #include <linux/platform_data/mv_usb.h> #include "phy-mv-usb.h" #define DRIVER_DESC "Marvell USB OTG transceiver driver" #define DRIVER_VERSION "Jan 20, 2010" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); static const char driver_name[] = "mv-otg"; static char *state_string[] = { "undefined", "b_idle", "b_srp_init", "b_peripheral", "b_wait_acon", "b_host", "a_idle", "a_wait_vrise", "a_wait_bcon", "a_host", "a_suspend", "a_peripheral", "a_wait_vfall", "a_vbus_err" }; static int mv_otg_set_vbus(struct usb_otg *otg, bool on) { struct mv_otg *mvotg = container_of(otg->phy, struct mv_otg, phy); if (mvotg->pdata->set_vbus == NULL) return -ENODEV; return mvotg->pdata->set_vbus(on); } static int mv_otg_set_host(struct usb_otg *otg, struct usb_bus *host) { otg->host = host; return 0; } static int mv_otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { otg->gadget = gadget; return 0; } static void mv_otg_run_state_machine(struct mv_otg *mvotg, unsigned long delay) { dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n"); if (!mvotg->qwork) return; queue_delayed_work(mvotg->qwork, &mvotg->work, delay); } static void mv_otg_timer_await_bcon(unsigned long data) { struct mv_otg *mvotg = (struct mv_otg *) data; mvotg->otg_ctrl.a_wait_bcon_timeout = 1; dev_info(&mvotg->pdev->dev, "B Device No Response!\n"); if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 0); spin_unlock(&mvotg->wq_lock); } } static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id) { struct timer_list *timer; if (id >= OTG_TIMER_NUM) return -EINVAL; timer = &mvotg->otg_ctrl.timer[id]; if (timer_pending(timer)) del_timer(timer); return 0; } static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id, unsigned long interval, void (*callback) (unsigned long)) { struct timer_list *timer; if (id >= OTG_TIMER_NUM) return -EINVAL; timer = &mvotg->otg_ctrl.timer[id]; if (timer_pending(timer)) { dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id); return -EBUSY; } init_timer(timer); timer->data = (unsigned long) mvotg; timer->function = callback; timer->expires = jiffies + interval; add_timer(timer); return 0; } static int mv_otg_reset(struct mv_otg *mvotg) { unsigned int loops; u32 tmp; /* Stop the controller */ tmp = readl(&mvotg->op_regs->usbcmd); tmp &= ~USBCMD_RUN_STOP; writel(tmp, &mvotg->op_regs->usbcmd); /* Reset the controller to get default values */ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd); loops = 500; while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) { if (loops == 0) { dev_err(&mvotg->pdev->dev, "Wait for RESET completed TIMEOUT\n"); return -ETIMEDOUT; } loops--; udelay(20); } writel(0x0, &mvotg->op_regs->usbintr); tmp = readl(&mvotg->op_regs->usbsts); writel(tmp, &mvotg->op_regs->usbsts); return 0; } static void mv_otg_init_irq(struct mv_otg *mvotg) { u32 otgsc; mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID | OTGSC_INTR_A_VBUS_VALID; mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID | OTGSC_INTSTS_A_VBUS_VALID; if (mvotg->pdata->vbus == NULL) { mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID | OTGSC_INTR_B_SESSION_END; mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID | OTGSC_INTSTS_B_SESSION_END; } if (mvotg->pdata->id == NULL) { mvotg->irq_en |= OTGSC_INTR_USB_ID; mvotg->irq_status |= OTGSC_INTSTS_USB_ID; } otgsc = readl(&mvotg->op_regs->otgsc); otgsc |= mvotg->irq_en; writel(otgsc, &mvotg->op_regs->otgsc); } static void mv_otg_start_host(struct mv_otg *mvotg, int on) { #ifdef CONFIG_USB struct usb_otg *otg = mvotg->phy.otg; struct usb_hcd *hcd; if (!otg->host) return; dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop"); hcd = bus_to_hcd(otg->host); if (on) usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); else usb_remove_hcd(hcd); #endif /* CONFIG_USB */ } static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on) { struct usb_otg *otg = mvotg->phy.otg; if (!otg->gadget) return; dev_info(mvotg->phy.dev, "gadget %s\n", on ? "on" : "off"); if (on) usb_gadget_vbus_connect(otg->gadget); else usb_gadget_vbus_disconnect(otg->gadget); } static void otg_clock_enable(struct mv_otg *mvotg) { clk_prepare_enable(mvotg->clk); } static void otg_clock_disable(struct mv_otg *mvotg) { clk_disable_unprepare(mvotg->clk); } static int mv_otg_enable_internal(struct mv_otg *mvotg) { int retval = 0; if (mvotg->active) return 0; dev_dbg(&mvotg->pdev->dev, "otg enabled\n"); otg_clock_enable(mvotg); if (mvotg->pdata->phy_init) { retval = mvotg->pdata->phy_init(mvotg->phy_regs); if (retval) { dev_err(&mvotg->pdev->dev, "init phy error %d\n", retval); otg_clock_disable(mvotg); return retval; } } mvotg->active = 1; return 0; } static int mv_otg_enable(struct mv_otg *mvotg) { if (mvotg->clock_gating) return mv_otg_enable_internal(mvotg); return 0; } static void mv_otg_disable_internal(struct mv_otg *mvotg) { if (mvotg->active) { dev_dbg(&mvotg->pdev->dev, "otg disabled\n"); if (mvotg->pdata->phy_deinit) mvotg->pdata->phy_deinit(mvotg->phy_regs); otg_clock_disable(mvotg); mvotg->active = 0; } } static void mv_otg_disable(struct mv_otg *mvotg) { if (mvotg->clock_gating) mv_otg_disable_internal(mvotg); } static void mv_otg_update_inputs(struct mv_otg *mvotg) { struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; u32 otgsc; otgsc = readl(&mvotg->op_regs->otgsc); if (mvotg->pdata->vbus) { if (mvotg->pdata->vbus->poll() == VBUS_HIGH) { otg_ctrl->b_sess_vld = 1; otg_ctrl->b_sess_end = 0; } else { otg_ctrl->b_sess_vld = 0; otg_ctrl->b_sess_end = 1; } } else { otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID); otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END); } if (mvotg->pdata->id) otg_ctrl->id = !!mvotg->pdata->id->poll(); else otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID); if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id) otg_ctrl->a_bus_req = 1; otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID); otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID); dev_dbg(&mvotg->pdev->dev, "%s: ", __func__); dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id); dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld); dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end); dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld); dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld); } static void mv_otg_update_state(struct mv_otg *mvotg) { struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; struct usb_phy *phy = &mvotg->phy; int old_state = phy->state; switch (old_state) { case OTG_STATE_UNDEFINED: phy->state = OTG_STATE_B_IDLE; /* FALL THROUGH */ case OTG_STATE_B_IDLE: if (otg_ctrl->id == 0) phy->state = OTG_STATE_A_IDLE; else if (otg_ctrl->b_sess_vld) phy->state = OTG_STATE_B_PERIPHERAL; break; case OTG_STATE_B_PERIPHERAL: if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0) phy->state = OTG_STATE_B_IDLE; break; case OTG_STATE_A_IDLE: if (otg_ctrl->id) phy->state = OTG_STATE_B_IDLE; else if (!(otg_ctrl->a_bus_drop) && (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det)) phy->state = OTG_STATE_A_WAIT_VRISE; break; case OTG_STATE_A_WAIT_VRISE: if (otg_ctrl->a_vbus_vld) phy->state = OTG_STATE_A_WAIT_BCON; break; case OTG_STATE_A_WAIT_BCON: if (otg_ctrl->id || otg_ctrl->a_bus_drop || otg_ctrl->a_wait_bcon_timeout) { mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); mvotg->otg_ctrl.a_wait_bcon_timeout = 0; phy->state = OTG_STATE_A_WAIT_VFALL; otg_ctrl->a_bus_req = 0; } else if (!otg_ctrl->a_vbus_vld) { mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); mvotg->otg_ctrl.a_wait_bcon_timeout = 0; phy->state = OTG_STATE_A_VBUS_ERR; } else if (otg_ctrl->b_conn) { mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); mvotg->otg_ctrl.a_wait_bcon_timeout = 0; phy->state = OTG_STATE_A_HOST; } break; case OTG_STATE_A_HOST: if (otg_ctrl->id || !otg_ctrl->b_conn || otg_ctrl->a_bus_drop) phy->state = OTG_STATE_A_WAIT_BCON; else if (!otg_ctrl->a_vbus_vld) phy->state = OTG_STATE_A_VBUS_ERR; break; case OTG_STATE_A_WAIT_VFALL: if (otg_ctrl->id || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld) || otg_ctrl->a_bus_req) phy->state = OTG_STATE_A_IDLE; break; case OTG_STATE_A_VBUS_ERR: if (otg_ctrl->id || otg_ctrl->a_clr_err || otg_ctrl->a_bus_drop) { otg_ctrl->a_clr_err = 0; phy->state = OTG_STATE_A_WAIT_VFALL; } break; default: break; } } static void mv_otg_work(struct work_struct *work) { struct mv_otg *mvotg; struct usb_phy *phy; struct usb_otg *otg; int old_state; mvotg = container_of(to_delayed_work(work), struct mv_otg, work); run: /* work queue is single thread, or we need spin_lock to protect */ phy = &mvotg->phy; otg = phy->otg; old_state = phy->state; if (!mvotg->active) return; mv_otg_update_inputs(mvotg); mv_otg_update_state(mvotg); if (old_state != phy->state) { dev_info(&mvotg->pdev->dev, "change from state %s to %s\n", state_string[old_state], state_string[phy->state]); switch (phy->state) { case OTG_STATE_B_IDLE: otg->default_a = 0; if (old_state == OTG_STATE_B_PERIPHERAL) mv_otg_start_periphrals(mvotg, 0); mv_otg_reset(mvotg); mv_otg_disable(mvotg); break; case OTG_STATE_B_PERIPHERAL: mv_otg_enable(mvotg); mv_otg_start_periphrals(mvotg, 1); break; case OTG_STATE_A_IDLE: otg->default_a = 1; mv_otg_enable(mvotg); if (old_state == OTG_STATE_A_WAIT_VFALL) mv_otg_start_host(mvotg, 0); mv_otg_reset(mvotg); break; case OTG_STATE_A_WAIT_VRISE: mv_otg_set_vbus(otg, 1); break; case OTG_STATE_A_WAIT_BCON: if (old_state != OTG_STATE_A_HOST) mv_otg_start_host(mvotg, 1); mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER, T_A_WAIT_BCON, mv_otg_timer_await_bcon); /* * Now, we directly enter A_HOST. So set b_conn = 1 * here. In fact, it need host driver to notify us. */ mvotg->otg_ctrl.b_conn = 1; break; case OTG_STATE_A_HOST: break; case OTG_STATE_A_WAIT_VFALL: /* * Now, we has exited A_HOST. So set b_conn = 0 * here. In fact, it need host driver to notify us. */ mvotg->otg_ctrl.b_conn = 0; mv_otg_set_vbus(otg, 0); break; case OTG_STATE_A_VBUS_ERR: break; default: break; } goto run; } } static irqreturn_t mv_otg_irq(int irq, void *dev) { struct mv_otg *mvotg = dev; u32 otgsc; otgsc = readl(&mvotg->op_regs->otgsc); writel(otgsc, &mvotg->op_regs->otgsc); /* * if we have vbus, then the vbus detection for B-device * will be done by mv_otg_inputs_irq(). */ if (mvotg->pdata->vbus) if ((otgsc & OTGSC_STS_USB_ID) && !(otgsc & OTGSC_INTSTS_USB_ID)) return IRQ_NONE; if ((otgsc & mvotg->irq_status) == 0) return IRQ_NONE; mv_otg_run_state_machine(mvotg, 0); return IRQ_HANDLED; } static irqreturn_t mv_otg_inputs_irq(int irq, void *dev) { struct mv_otg *mvotg = dev; /* The clock may disabled at this time */ if (!mvotg->active) { mv_otg_enable(mvotg); mv_otg_init_irq(mvotg); } mv_otg_run_state_machine(mvotg, 0); return IRQ_HANDLED; } static ssize_t get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf) { struct mv_otg *mvotg = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", mvotg->otg_ctrl.a_bus_req); } static ssize_t set_a_bus_req(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mv_otg *mvotg = dev_get_drvdata(dev); if (count > 2) return -1; /* We will use this interface to change to A device */ if (mvotg->phy.state != OTG_STATE_B_IDLE && mvotg->phy.state != OTG_STATE_A_IDLE) return -1; /* The clock may disabled and we need to set irq for ID detected */ mv_otg_enable(mvotg); mv_otg_init_irq(mvotg); if (buf[0] == '1') { mvotg->otg_ctrl.a_bus_req = 1; mvotg->otg_ctrl.a_bus_drop = 0; dev_dbg(&mvotg->pdev->dev, "User request: a_bus_req = 1\n"); if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 0); spin_unlock(&mvotg->wq_lock); } } return count; } static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); static ssize_t set_a_clr_err(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mv_otg *mvotg = dev_get_drvdata(dev); if (!mvotg->phy.otg->default_a) return -1; if (count > 2) return -1; if (buf[0] == '1') { mvotg->otg_ctrl.a_clr_err = 1; dev_dbg(&mvotg->pdev->dev, "User request: a_clr_err = 1\n"); } if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 0); spin_unlock(&mvotg->wq_lock); } return count; } static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); static ssize_t get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) { struct mv_otg *mvotg = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d\n", mvotg->otg_ctrl.a_bus_drop); } static ssize_t set_a_bus_drop(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct mv_otg *mvotg = dev_get_drvdata(dev); if (!mvotg->phy.otg->default_a) return -1; if (count > 2) return -1; if (buf[0] == '0') { mvotg->otg_ctrl.a_bus_drop = 0; dev_dbg(&mvotg->pdev->dev, "User request: a_bus_drop = 0\n"); } else if (buf[0] == '1') { mvotg->otg_ctrl.a_bus_drop = 1; mvotg->otg_ctrl.a_bus_req = 0; dev_dbg(&mvotg->pdev->dev, "User request: a_bus_drop = 1\n"); dev_dbg(&mvotg->pdev->dev, "User request: and a_bus_req = 0\n"); } if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 0); spin_unlock(&mvotg->wq_lock); } return count; } static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); static struct attribute *inputs_attrs[] = { &dev_attr_a_bus_req.attr, &dev_attr_a_clr_err.attr, &dev_attr_a_bus_drop.attr, NULL, }; static struct attribute_group inputs_attr_group = { .name = "inputs", .attrs = inputs_attrs, }; int mv_otg_remove(struct platform_device *pdev) { struct mv_otg *mvotg = platform_get_drvdata(pdev); sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group); if (mvotg->qwork) { flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); } mv_otg_disable(mvotg); usb_remove_phy(&mvotg->phy); return 0; } static int mv_otg_probe(struct platform_device *pdev) { struct mv_usb_platform_data *pdata = pdev->dev.platform_data; struct mv_otg *mvotg; struct usb_otg *otg; struct resource *r; int retval = 0, i; if (pdata == NULL) { dev_err(&pdev->dev, "failed to get platform data\n"); return -ENODEV; } mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL); if (!mvotg) { dev_err(&pdev->dev, "failed to allocate memory!\n"); return -ENOMEM; } otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; platform_set_drvdata(pdev, mvotg); mvotg->pdev = pdev; mvotg->pdata = pdata; mvotg->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mvotg->clk)) return PTR_ERR(mvotg->clk); mvotg->qwork = create_singlethread_workqueue("mv_otg_queue"); if (!mvotg->qwork) { dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n"); return -ENOMEM; } INIT_DELAYED_WORK(&mvotg->work, mv_otg_work); /* OTG common part */ mvotg->pdev = pdev; mvotg->phy.dev = &pdev->dev; mvotg->phy.otg = otg; mvotg->phy.label = driver_name; mvotg->phy.state = OTG_STATE_UNDEFINED; otg->phy = &mvotg->phy; otg->set_host = mv_otg_set_host; otg->set_peripheral = mv_otg_set_peripheral; otg->set_vbus = mv_otg_set_vbus; for (i = 0; i < OTG_TIMER_NUM; i++) init_timer(&mvotg->otg_ctrl.timer[i]); r = platform_get_resource_byname(mvotg->pdev, IORESOURCE_MEM, "phyregs"); if (r == NULL) { dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); retval = -ENODEV; goto err_destroy_workqueue; } mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (mvotg->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); retval = -EFAULT; goto err_destroy_workqueue; } r = platform_get_resource_byname(mvotg->pdev, IORESOURCE_MEM, "capregs"); if (r == NULL) { dev_err(&pdev->dev, "no I/O memory resource defined\n"); retval = -ENODEV; goto err_destroy_workqueue; } mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (mvotg->cap_regs == NULL) { dev_err(&pdev->dev, "failed to map I/O memory\n"); retval = -EFAULT; goto err_destroy_workqueue; } /* we will acces controller register, so enable the udc controller */ retval = mv_otg_enable_internal(mvotg); if (retval) { dev_err(&pdev->dev, "mv otg enable error %d\n", retval); goto err_destroy_workqueue; } mvotg->op_regs = (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs + (readl(mvotg->cap_regs) & CAPLENGTH_MASK)); if (pdata->id) { retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq, NULL, mv_otg_inputs_irq, IRQF_ONESHOT, "id", mvotg); if (retval) { dev_info(&pdev->dev, "Failed to request irq for ID\n"); pdata->id = NULL; } } if (pdata->vbus) { mvotg->clock_gating = 1; retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq, NULL, mv_otg_inputs_irq, IRQF_ONESHOT, "vbus", mvotg); if (retval) { dev_info(&pdev->dev, "Failed to request irq for VBUS, " "disable clock gating\n"); mvotg->clock_gating = 0; pdata->vbus = NULL; } } if (pdata->disable_otg_clock_gating) mvotg->clock_gating = 0; mv_otg_reset(mvotg); mv_otg_init_irq(mvotg); r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0); if (r == NULL) { dev_err(&pdev->dev, "no IRQ resource defined\n"); retval = -ENODEV; goto err_disable_clk; } mvotg->irq = r->start; if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED, driver_name, mvotg)) { dev_err(&pdev->dev, "Request irq %d for OTG failed\n", mvotg->irq); mvotg->irq = 0; retval = -ENODEV; goto err_disable_clk; } retval = usb_add_phy(&mvotg->phy, USB_PHY_TYPE_USB2); if (retval < 0) { dev_err(&pdev->dev, "can't register transceiver, %d\n", retval); goto err_disable_clk; } retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group); if (retval < 0) { dev_dbg(&pdev->dev, "Can't register sysfs attr group: %d\n", retval); goto err_remove_phy; } spin_lock_init(&mvotg->wq_lock); if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 2 * HZ); spin_unlock(&mvotg->wq_lock); } dev_info(&pdev->dev, "successful probe OTG device %s clock gating.\n", mvotg->clock_gating ? "with" : "without"); return 0; err_remove_phy: usb_remove_phy(&mvotg->phy); err_disable_clk: mv_otg_disable_internal(mvotg); err_destroy_workqueue: flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); return retval; } #ifdef CONFIG_PM static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state) { struct mv_otg *mvotg = platform_get_drvdata(pdev); if (mvotg->phy.state != OTG_STATE_B_IDLE) { dev_info(&pdev->dev, "OTG state is not B_IDLE, it is %d!\n", mvotg->phy.state); return -EAGAIN; } if (!mvotg->clock_gating) mv_otg_disable_internal(mvotg); return 0; } static int mv_otg_resume(struct platform_device *pdev) { struct mv_otg *mvotg = platform_get_drvdata(pdev); u32 otgsc; if (!mvotg->clock_gating) { mv_otg_enable_internal(mvotg); otgsc = readl(&mvotg->op_regs->otgsc); otgsc |= mvotg->irq_en; writel(otgsc, &mvotg->op_regs->otgsc); if (spin_trylock(&mvotg->wq_lock)) { mv_otg_run_state_machine(mvotg, 0); spin_unlock(&mvotg->wq_lock); } } return 0; } #endif static struct platform_driver mv_otg_driver = { .probe = mv_otg_probe, .remove = __exit_p(mv_otg_remove), .driver = { .owner = THIS_MODULE, .name = driver_name, }, #ifdef CONFIG_PM .suspend = mv_otg_suspend, .resume = mv_otg_resume, #endif }; module_platform_driver(mv_otg_driver);
gpl-2.0
AudioGod/Gods-Kernel-Huawei-Angler
drivers/net/wireless/ath/ath9k/calib.c
2139
11765
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include <linux/export.h> /* Common calibration code */ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) { int16_t nfval; int16_t sort[ATH9K_NF_CAL_HIST_MAX]; int i, j; for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++) sort[i] = nfCalBuffer[i]; for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) { for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) { if (sort[j] > sort[j - 1]) { nfval = sort[j]; sort[j] = sort[j - 1]; sort[j - 1] = nfval; } } } nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; return nfval; } static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_nf_limits *limit; if (!chan || IS_CHAN_2GHZ(chan)) limit = &ah->nf_2g; else limit = &ah->nf_5g; return limit; } static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, struct ath9k_channel *chan) { return ath9k_hw_get_nf_limits(ah, chan)->nominal; } s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) { s8 noise = ATH_DEFAULT_NOISE_FLOOR; if (chan && chan->noisefloor) { s8 delta = chan->noisefloor - ATH9K_NF_CAL_NOISE_THRESH - ath9k_hw_get_default_nf(ah, chan); if (delta > 0) noise += delta; } return noise; } EXPORT_SYMBOL(ath9k_hw_getchan_noise); static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah, struct ath9k_hw_cal_data *cal, int16_t *nfarray) { struct ath_common *common = ath9k_hw_common(ah); struct ath_nf_limits *limit; struct ath9k_nfcal_hist *h; bool high_nf_mid = false; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; int i; h = cal->nfCalHist; limit = ath9k_hw_get_nf_limits(ah, ah->curchan); for (i = 0; i < NUM_NF_READINGS; i++) { if (!(chainmask & (1 << i)) || ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(ah->curchan))) continue; h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) h[i].currIndex = 0; if (h[i].invalidNFcount > 0) { h[i].invalidNFcount--; h[i].privNF = nfarray[i]; } else { h[i].privNF = ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); } if (!h[i].privNF) continue; if (h[i].privNF > limit->max) { high_nf_mid = true; ath_dbg(common, CALIBRATE, "NFmid[%d] (%d) > MAX (%d), %s\n", i, h[i].privNF, limit->max, (cal->nfcal_interference ? "not corrected (due to interference)" : "correcting to MAX")); /* * Normally we limit the average noise floor by the * hardware specific maximum here. However if we have * encountered stuck beacons because of interference, * we bypass this limit here in order to better deal * with our environment. */ if (!cal->nfcal_interference) h[i].privNF = limit->max; } } /* * If the noise floor seems normal for all chains, assume that * there is no significant interference in the environment anymore. * Re-enable the enforcement of the NF maximum again. */ if (!high_nf_mid) cal->nfcal_interference = false; } static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, enum ieee80211_band band, int16_t *nft) { switch (band) { case IEEE80211_BAND_5GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5); break; case IEEE80211_BAND_2GHZ: *nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2); break; default: BUG_ON(1); return false; } return true; } void ath9k_hw_reset_calibration(struct ath_hw *ah, struct ath9k_cal_list *currCal) { int i; ath9k_hw_setup_calibration(ah, currCal); currCal->calState = CAL_RUNNING; for (i = 0; i < AR5416_MAX_CHAINS; i++) { ah->meas0.sign[i] = 0; ah->meas1.sign[i] = 0; ah->meas2.sign[i] = 0; ah->meas3.sign[i] = 0; } ah->cal_samples = 0; } /* This is done for the currently configured channel */ bool ath9k_hw_reset_calvalid(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; struct ath9k_cal_list *currCal = ah->cal_list_curr; if (!ah->caldata) return true; if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) return true; if (currCal == NULL) return true; if (currCal->calState != CAL_DONE) { ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n", currCal->calState); return true; } if (!(ah->supp_cals & currCal->calData->calType)) return true; ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n", currCal->calData->calType, conf->chandef.chan->center_freq); ah->caldata->CalValid &= ~currCal->calData->calType; currCal->calState = CAL_WAITING; return false; } EXPORT_SYMBOL(ath9k_hw_reset_calvalid); void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update) { if (ah->caldata) ah->caldata->nfcal_pending = true; REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); if (update) REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); else REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); } void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h = NULL; unsigned i, j; int32_t val; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; s16 default_nf = ath9k_hw_get_default_nf(ah, chan); if (ah->caldata) h = ah->caldata->nfCalHist; for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { s16 nfval; if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) continue; if (h) nfval = h[i].privNF; else nfval = default_nf; val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; val |= (((u32) nfval << 1) & 0x1ff); REG_WRITE(ah, ah->nf_regs[i], val); } } /* * Load software filtered NF value into baseband internal minCCApwr * variable. */ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); /* * Wait for load to complete, should be fast, a few 10s of us. * The max delay was changed from an original 250us to 10000us * since 250us often results in NF load timeout and causes deaf * condition during stress testing 12/12/2009 */ for (j = 0; j < 10000; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(10); } /* * We timed out waiting for the noisefloor to load, probably due to an * in-progress rx. Simply return here and allow the load plenty of time * to complete before the next calibration interval. We need to avoid * trying to load -50 (which happens below) while the previous load is * still in progress as this can cause rx deafness. Instead by returning * here, the baseband nf cal will just be capped by our present * noisefloor until the next calibration timer. */ if (j == 10000) { ath_dbg(common, ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); return; } /* * Restore maxCCAPower register parameter again so that we're not capped * by the median we just loaded. This will be initial (and max) value * of next noise floor calibration the baseband does. */ ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) continue; val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (-50) << 1) & 0x1ff); REG_WRITE(ah, ah->nf_regs[i], val); } } REGWRITE_BUFFER_FLUSH(ah); } static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf) { struct ath_common *common = ath9k_hw_common(ah); struct ath_nf_limits *limit; int i; if (IS_CHAN_2GHZ(ah->curchan)) limit = &ah->nf_2g; else limit = &ah->nf_5g; for (i = 0; i < NUM_NF_READINGS; i++) { if (!nf[i]) continue; ath_dbg(common, CALIBRATE, "NF calibrated [%s] [chain %d] is %d\n", (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); if (nf[i] > limit->max) { ath_dbg(common, CALIBRATE, "NF[%d] (%d) > MAX (%d), correcting to MAX\n", i, nf[i], limit->max); nf[i] = limit->max; } else if (nf[i] < limit->min) { ath_dbg(common, CALIBRATE, "NF[%d] (%d) < MIN (%d), correcting to NOM\n", i, nf[i], limit->min); nf[i] = limit->nominal; } } } bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); int16_t nf, nfThresh; int16_t nfarray[NUM_NF_READINGS] = { 0 }; struct ath9k_nfcal_hist *h; struct ieee80211_channel *c = chan->chan; struct ath9k_hw_cal_data *caldata = ah->caldata; if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { ath_dbg(common, CALIBRATE, "NF did not complete in calibration window\n"); return false; } ath9k_hw_do_getnf(ah, nfarray); ath9k_hw_nf_sanitize(ah, nfarray); nf = nfarray[0]; if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) && nf > nfThresh) { ath_dbg(common, CALIBRATE, "noise floor failed detected; detected %d, threshold %d\n", nf, nfThresh); } if (!caldata) { chan->noisefloor = nf; return false; } h = caldata->nfCalHist; caldata->nfcal_pending = false; ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray); chan->noisefloor = h[0].privNF; ah->noise = ath9k_hw_getchan_noise(ah, chan); return true; } EXPORT_SYMBOL(ath9k_hw_getnf); void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h; s16 default_nf; int i, j; ah->caldata->channel = chan->channel; ah->caldata->channelFlags = chan->channelFlags; ah->caldata->chanmode = chan->chanmode; h = ah->caldata->nfCalHist; default_nf = ath9k_hw_get_default_nf(ah, chan); for (i = 0; i < NUM_NF_READINGS; i++) { h[i].currIndex = 0; h[i].privNF = default_nf; h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH; for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { h[i].nfCalBuffer[j] = default_nf; } } } void ath9k_hw_bstuck_nfcal(struct ath_hw *ah) { struct ath9k_hw_cal_data *caldata = ah->caldata; if (unlikely(!caldata)) return; /* * If beacons are stuck, the most likely cause is interference. * Triggering a noise floor calibration at this point helps the * hardware adapt to a noisy environment much faster. * To ensure that we recover from stuck beacons quickly, let * the baseband update the internal NF value itself, similar to * what is being done after a full reset. */ if (!caldata->nfcal_pending) ath9k_hw_start_nfcal(ah, true); else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF)) ath9k_hw_getnf(ah, ah->curchan); caldata->nfcal_interference = true; } EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
gpl-2.0
drod2169/android_kernel_lge_bullhead
drivers/xen/xen-acpi-memhotplug.c
2139
12023
/* * Copyright (C) 2012 Intel Corporation * Author: Liu Jinsong <jinsong.liu@intel.com> * Author: Jiang Yunhong <yunhong.jiang@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> #include <acpi/acpi_drivers.h> #include <xen/acpi.h> #include <xen/interface/platform.h> #include <asm/xen/hypercall.h> #define PREFIX "ACPI:xen_memory_hotplug:" struct acpi_memory_info { struct list_head list; u64 start_addr; /* Memory Range start physical addr */ u64 length; /* Memory Range length */ unsigned short caching; /* memory cache attribute */ unsigned short write_protect; /* memory read/write attribute */ /* copied from buffer getting from _CRS */ unsigned int enabled:1; }; struct acpi_memory_device { struct acpi_device *device; struct list_head res_list; }; static bool acpi_hotmem_initialized __read_mostly; static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info) { int rc; struct xen_platform_op op; op.cmd = XENPF_mem_hotadd; op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT; op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT; op.u.mem_add.pxm = pxm; rc = HYPERVISOR_dom0_op(&op); if (rc) pr_err(PREFIX "Xen Hotplug Memory Add failed on " "0x%lx -> 0x%lx, _PXM: %d, error: %d\n", (unsigned long)info->start_addr, (unsigned long)(info->start_addr + info->length), pxm, rc); return rc; } static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device) { int pxm, result; int num_enabled = 0; struct acpi_memory_info *info; if (!mem_device) return -EINVAL; pxm = xen_acpi_get_pxm(mem_device->device->handle); if (pxm < 0) return pxm; list_for_each_entry(info, &mem_device->res_list, list) { if (info->enabled) { /* just sanity check...*/ num_enabled++; continue; } if (!info->length) continue; result = xen_hotadd_memory(pxm, info); if (result) continue; info->enabled = 1; num_enabled++; } if (!num_enabled) return -ENODEV; return 0; } static acpi_status acpi_memory_get_resource(struct acpi_resource *resource, void *context) { struct acpi_memory_device *mem_device = context; struct acpi_resource_address64 address64; struct acpi_memory_info *info, *new; acpi_status status; status = acpi_resource_to_address64(resource, &address64); if (ACPI_FAILURE(status) || (address64.resource_type != ACPI_MEMORY_RANGE)) return AE_OK; list_for_each_entry(info, &mem_device->res_list, list) { if ((info->caching == address64.info.mem.caching) && (info->write_protect == address64.info.mem.write_protect) && (info->start_addr + info->length == address64.minimum)) { info->length += address64.address_length; return AE_OK; } } new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL); if (!new) return AE_ERROR; INIT_LIST_HEAD(&new->list); new->caching = address64.info.mem.caching; new->write_protect = address64.info.mem.write_protect; new->start_addr = address64.minimum; new->length = address64.address_length; list_add_tail(&new->list, &mem_device->res_list); return AE_OK; } static int acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) { acpi_status status; struct acpi_memory_info *info, *n; if (!list_empty(&mem_device->res_list)) return 0; status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS, acpi_memory_get_resource, mem_device); if (ACPI_FAILURE(status)) { list_for_each_entry_safe(info, n, &mem_device->res_list, list) kfree(info); INIT_LIST_HEAD(&mem_device->res_list); return -EINVAL; } return 0; } static int acpi_memory_get_device(acpi_handle handle, struct acpi_memory_device **mem_device) { struct acpi_device *device = NULL; int result = 0; acpi_scan_lock_acquire(); acpi_bus_get_device(handle, &device); if (device) goto end; /* * Now add the notified device. This creates the acpi_device * and invokes .add function */ result = acpi_bus_scan(handle); if (result) { pr_warn(PREFIX "ACPI namespace scan failed\n"); result = -EINVAL; goto out; } result = acpi_bus_get_device(handle, &device); if (result) { pr_warn(PREFIX "Missing device object\n"); result = -EINVAL; goto out; } end: *mem_device = acpi_driver_data(device); if (!(*mem_device)) { pr_err(PREFIX "driver data not found\n"); result = -ENODEV; goto out; } out: acpi_scan_lock_release(); return result; } static int acpi_memory_check_device(struct acpi_memory_device *mem_device) { unsigned long long current_status; /* Get device present/absent information from the _STA */ if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA", NULL, &current_status))) return -ENODEV; /* * Check for device status. Device should be * present/enabled/functioning. */ if (!((current_status & ACPI_STA_DEVICE_PRESENT) && (current_status & ACPI_STA_DEVICE_ENABLED) && (current_status & ACPI_STA_DEVICE_FUNCTIONING))) return -ENODEV; return 0; } static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) { pr_debug(PREFIX "Xen does not support memory hotremove\n"); return -ENOSYS; } static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) { struct acpi_memory_device *mem_device; struct acpi_device *device; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ switch (event) { case ACPI_NOTIFY_BUS_CHECK: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived BUS CHECK notification for device\n")); /* Fall Through */ case ACPI_NOTIFY_DEVICE_CHECK: if (event == ACPI_NOTIFY_DEVICE_CHECK) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived DEVICE CHECK notification for device\n")); if (acpi_memory_get_device(handle, &mem_device)) { pr_err(PREFIX "Cannot find driver data\n"); break; } ost_code = ACPI_OST_SC_SUCCESS; break; case ACPI_NOTIFY_EJECT_REQUEST: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "\nReceived EJECT REQUEST notification for device\n")); acpi_scan_lock_acquire(); if (acpi_bus_get_device(handle, &device)) { acpi_scan_lock_release(); pr_err(PREFIX "Device doesn't exist\n"); break; } mem_device = acpi_driver_data(device); if (!mem_device) { acpi_scan_lock_release(); pr_err(PREFIX "Driver Data is NULL\n"); break; } /* * TBD: implement acpi_memory_disable_device and invoke * acpi_bus_remove if Xen support hotremove in the future */ acpi_memory_disable_device(mem_device); acpi_scan_lock_release(); break; default: ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported event [0x%x]\n", event)); /* non-hotplug event; possibly handled by other handler */ return; } (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); return; } static int xen_acpi_memory_device_add(struct acpi_device *device) { int result; struct acpi_memory_device *mem_device = NULL; if (!device) return -EINVAL; mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL); if (!mem_device) return -ENOMEM; INIT_LIST_HEAD(&mem_device->res_list); mem_device->device = device; sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); device->driver_data = mem_device; /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { kfree(mem_device); return result; } /* * For booting existed memory devices, early boot code has recognized * memory area by EFI/E820. If DSDT shows these memory devices on boot, * hotplug is not necessary for them. * For hot-added memory devices during runtime, it need hypercall to * Xen hypervisor to add memory. */ if (!acpi_hotmem_initialized) return 0; if (!acpi_memory_check_device(mem_device)) result = xen_acpi_memory_enable_device(mem_device); return result; } static int xen_acpi_memory_device_remove(struct acpi_device *device) { struct acpi_memory_device *mem_device = NULL; if (!device || !acpi_driver_data(device)) return -EINVAL; mem_device = acpi_driver_data(device); kfree(mem_device); return 0; } /* * Helper function to check for memory device */ static acpi_status is_memory_device(acpi_handle handle) { char *hardware_id; acpi_status status; struct acpi_device_info *info; status = acpi_get_object_info(handle, &info); if (ACPI_FAILURE(status)) return status; if (!(info->valid & ACPI_VALID_HID)) { kfree(info); return AE_ERROR; } hardware_id = info->hardware_id.string; if ((hardware_id == NULL) || (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID))) status = AE_ERROR; kfree(info); return status; } static acpi_status acpi_memory_register_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) { acpi_status status; status = is_memory_device(handle); if (ACPI_FAILURE(status)) return AE_OK; /* continue */ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify, NULL); /* continue */ return AE_OK; } static acpi_status acpi_memory_deregister_notify_handler(acpi_handle handle, u32 level, void *ctxt, void **retv) { acpi_status status; status = is_memory_device(handle); if (ACPI_FAILURE(status)) return AE_OK; /* continue */ status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, acpi_memory_device_notify); return AE_OK; /* continue */ } static const struct acpi_device_id memory_device_ids[] = { {ACPI_MEMORY_DEVICE_HID, 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, memory_device_ids); static struct acpi_driver xen_acpi_memory_device_driver = { .name = "acpi_memhotplug", .class = ACPI_MEMORY_DEVICE_CLASS, .ids = memory_device_ids, .ops = { .add = xen_acpi_memory_device_add, .remove = xen_acpi_memory_device_remove, }, }; static int __init xen_acpi_memory_device_init(void) { int result; acpi_status status; if (!xen_initial_domain()) return -ENODEV; /* unregister the stub which only used to reserve driver space */ xen_stub_memory_device_exit(); result = acpi_bus_register_driver(&xen_acpi_memory_device_driver); if (result < 0) { xen_stub_memory_device_init(); return -ENODEV; } status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_memory_register_notify_handler, NULL, NULL, NULL); if (ACPI_FAILURE(status)) { pr_warn(PREFIX "walk_namespace failed\n"); acpi_bus_unregister_driver(&xen_acpi_memory_device_driver); xen_stub_memory_device_init(); return -ENODEV; } acpi_hotmem_initialized = true; return 0; } static void __exit xen_acpi_memory_device_exit(void) { acpi_status status; if (!xen_initial_domain()) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, acpi_memory_deregister_notify_handler, NULL, NULL, NULL); if (ACPI_FAILURE(status)) pr_warn(PREFIX "walk_namespace failed\n"); acpi_bus_unregister_driver(&xen_acpi_memory_device_driver); /* * stub reserve space again to prevent any chance of native * driver loading. */ xen_stub_memory_device_init(); return; } module_init(xen_acpi_memory_device_init); module_exit(xen_acpi_memory_device_exit); ACPI_MODULE_NAME("xen-acpi-memhotplug"); MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>"); MODULE_DESCRIPTION("Xen Hotplug Mem Driver"); MODULE_LICENSE("GPL");
gpl-2.0
gmm001/android_kernel_zte_nx503a-1
drivers/net/ppp/pppoe.c
2907
28155
/** -*- linux-c -*- *********************************************************** * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoE --- PPP over Ethernet (RFC 2516) * * * Version: 0.7.0 * * 070228 : Fix to allow multiple sessions with same remote MAC and same * session id by including the local device ifindex in the * tuple identifying a session. This also ensures packets can't * be injected into a session from interfaces other than the one * specified by userspace. Florian Zumbiehl <florz@florz.de> * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme * 030700 : Fixed connect logic to allow for disconnect. * 270700 : Fixed potential SMP problems; we must protect against * simultaneous invocation of ppp_input * and ppp_unregister_channel. * 040800 : Respect reference count mechanisms on net-devices. * 200800 : fix kfree(skb) in pppoe_rcv (acme) * Module reference count is decremented in the right spot now, * guards against sock_put not actually freeing the sk * in pppoe_release. * 051000 : Initialization cleanup. * 111100 : Fix recvmsg. * 050101 : Fix PADT procesing. * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey) * 170701 : Do not lock_sock with rwlock held. (DaveM) * Ignore discovery frames if user has socket * locked. (DaveM) * Ignore return value of dev_queue_xmit in __pppoe_xmit * or else we may kfree an SKB twice. (DaveM) * 190701 : When doing copies of skb's in __pppoe_xmit, always delete * the original skb that was passed in on success, never on * failure. Delete the copy of the skb on failure to avoid * a memory leak. * 081001 : Misc. cleanup (licence string, non-blocking, prevent * reference of device on close). * 121301 : New ppp channels interface; cannot unregister a channel * from interrupts. Thus, we mark the socket as a ZOMBIE * and do the unregistration later. * 081002 : seq_file support for proc stuff -acme * 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6 * as version 0.7. Spacing cleanup. * Author: Michal Ostrowski <mostrows@speakeasy.net> * Contributors: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * David S. Miller (davem@redhat.com) * * License: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/if_pppox.h> #include <linux/ppp_channel.h> #include <linux/ppp_defs.h> #include <linux/ppp-ioctl.h> #include <linux/notifier.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <asm/uaccess.h> #define PPPOE_HASH_BITS 4 #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; static const struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ static int pppoe_net_id __read_mostly; struct pppoe_net { /* * we could use _single_ hash table for all * nets by injecting net id into the hash but * it would increase hash chains and add * a few additional math comparations messy * as well, moreover in case of SMP less locking * controversy here */ struct pppox_sock *hash_table[PPPOE_HASH_SIZE]; rwlock_t hash_lock; }; /* * PPPoE could be in the following stages: * 1) Discovery stage (to obtain remote MAC and Session ID) * 2) Session stage (MAC and SID are known) * * Ethernet frames have a special tag for this but * we use simpler approach based on session id */ static inline bool stage_session(__be16 sid) { return sid != 0; } static inline struct pppoe_net *pppoe_pernet(struct net *net) { BUG_ON(!net); return net_generic(net, pppoe_net_id); } static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) { return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN); } static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN); } #if 8 % PPPOE_HASH_BITS #error 8 must be a multiple of PPPOE_HASH_BITS #endif static int hash_item(__be16 sid, unsigned char *addr) { unsigned char hash = 0; unsigned int i; for (i = 0; i < ETH_ALEN; i++) hash ^= addr[i]; for (i = 0; i < sizeof(sid_t) * 8; i += 8) hash ^= (__force __u32)sid >> i; for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;) hash ^= hash >> i; return hash & PPPOE_HASH_MASK; } /********************************************************************** * * Set/get/delete/rehash items (internal versions) * **********************************************************************/ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; ret = pn->hash_table[hash]; while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) return ret; ret = ret->next; } return NULL; } static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); struct pppox_sock *ret; ret = pn->hash_table[hash]; while (ret) { if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) return -EALREADY; ret = ret->next; } po->next = pn->hash_table[hash]; pn->hash_table[hash] = po; return 0; } static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, **src; ret = pn->hash_table[hash]; src = &pn->hash_table[hash]; while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { *src = ret->next; break; } src = &ret->next; ret = ret->next; } return ret; } /********************************************************************** * * Set/get/delete/rehash items * **********************************************************************/ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { struct pppox_sock *po; read_lock_bh(&pn->hash_lock); po = __get_item(pn, sid, addr, ifindex); if (po) sock_hold(sk_pppox(po)); read_unlock_bh(&pn->hash_lock); return po; } static inline struct pppox_sock *get_item_by_addr(struct net *net, struct sockaddr_pppox *sp) { struct net_device *dev; struct pppoe_net *pn; struct pppox_sock *pppox_sock = NULL; int ifindex; rcu_read_lock(); dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev); if (dev) { ifindex = dev->ifindex; pn = pppoe_pernet(net); pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); } rcu_read_unlock(); return pppox_sock; } static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { struct pppox_sock *ret; write_lock_bh(&pn->hash_lock); ret = __delete_item(pn, sid, addr, ifindex); write_unlock_bh(&pn->hash_lock); return ret; } /*************************************************************************** * * Handler for device events. * Certain device events require that sockets be unconnected. * **************************************************************************/ static void pppoe_flush_dev(struct net_device *dev) { struct pppoe_net *pn; int i; pn = pppoe_pernet(dev_net(dev)); write_lock_bh(&pn->hash_lock); for (i = 0; i < PPPOE_HASH_SIZE; i++) { struct pppox_sock *po = pn->hash_table[i]; struct sock *sk; while (po) { while (po && po->pppoe_dev != dev) { po = po->next; } if (!po) break; sk = sk_pppox(po); /* We always grab the socket lock, followed by the * hash_lock, in that order. Since we should hold the * sock lock while doing any unbinding, we need to * release the lock we're holding. Hold a reference to * the sock so it doesn't disappear as we're jumping * between locks. */ sock_hold(sk); write_unlock_bh(&pn->hash_lock); lock_sock(sk); if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppox_unbind_sock(sk); sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); } release_sock(sk); sock_put(sk); /* Restart the process from the start of the current * hash chain. We dropped locks so the world may have * change from underneath us. */ BUG_ON(pppoe_pernet(dev_net(dev)) == NULL); write_lock_bh(&pn->hash_lock); po = pn->hash_table[i]; } } write_unlock_bh(&pn->hash_lock); } static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; /* Only look at sockets that are using this specific device. */ switch (event) { case NETDEV_CHANGEADDR: case NETDEV_CHANGEMTU: /* A change in mtu or address is a bad thing, requiring * LCP re-negotiation. */ case NETDEV_GOING_DOWN: case NETDEV_DOWN: /* Find every socket on this device and kill it. */ pppoe_flush_dev(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block pppoe_notifier = { .notifier_call = pppoe_device_event, }; /************************************************************************ * * Do the real work of receiving a PPPoE Session frame. * ***********************************************************************/ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *relay_po; /* Backlog receive. Semantics of backlog rcv preclude any code from * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state * can't change. */ if (sk->sk_state & PPPOX_BOUND) { ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (relay_po == NULL) goto abort_kfree; if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) goto abort_put; if (!__pppoe_xmit(sk_pppox(relay_po), skb)) goto abort_put; } else { if (sock_queue_rcv_skb(sk, skb)) goto abort_kfree; } return NET_RX_SUCCESS; abort_put: sock_put(sk_pppox(relay_po)); abort_kfree: kfree_skb(skb); return NET_RX_DROP; } /************************************************************************ * * Receive wrapper called in BH context. * ***********************************************************************/ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; int len; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; ph = pppoe_hdr(skb); len = ntohs(ph->length); skb_pull_rcsum(skb, sizeof(*ph)); if (skb->len < len) goto drop; if (pskb_trim_rcsum(skb, len)) goto drop; pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) * is known to be safe. */ po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (!po) goto drop; return sk_receive_skb(sk_pppox(po), skb, 0); drop: kfree_skb(skb); out: return NET_RX_DROP; } /************************************************************************ * * Receive a PPPoE Discovery frame. * This is solely for detection of PADT frames * ***********************************************************************/ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; ph = pppoe_hdr(skb); if (ph->code != PADT_CODE) goto abort; pn = pppoe_pernet(dev_net(dev)); po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) { struct sock *sk = sk_pppox(po); bh_lock_sock(sk); /* If the user has locked the socket, just ignore * the packet. With the way two rcv protocols hook into * one socket family type, we cannot (easily) distinguish * what kind of SKB it is during backlog rcv. */ if (sock_owned_by_user(sk) == 0) { /* We're no longer connect at the PPPOE layer, * and must wait for ppp channel to disconnect us. */ sk->sk_state = PPPOX_ZOMBIE; } bh_unlock_sock(sk); sock_put(sk); } abort: kfree_skb(skb); out: return NET_RX_SUCCESS; /* Lies... :-) */ } static struct packet_type pppoes_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_SES), .func = pppoe_rcv, }; static struct packet_type pppoed_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_DISC), .func = pppoe_disc_rcv, }; static struct proto pppoe_sk_proto __read_mostly = { .name = "PPPOE", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; /*********************************************************************** * * Initialize a new struct sock. * **********************************************************************/ static int pppoe_create(struct net *net, struct socket *sock) { struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = &pppoe_ops; sk->sk_backlog_rcv = pppoe_rcv_core; sk->sk_state = PPPOX_NONE; sk->sk_type = SOCK_STREAM; sk->sk_family = PF_PPPOX; sk->sk_protocol = PX_PROTO_OE; return 0; } static int pppoe_release(struct socket *sock) { struct sock *sk = sock->sk; struct pppox_sock *po; struct pppoe_net *pn; struct net *net = NULL; if (!sk) return 0; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD)) { release_sock(sk); return -EBADF; } po = pppox_sk(sk); if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; net = sock_net(sk); pn = pppoe_pernet(net); /* * protect "po" from concurrent updates * on pppoe_flush_dev */ delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); sock_orphan(sk); sock->sk = NULL; skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = NULL; struct pppoe_net *pn; struct net *net = NULL; int error; lock_sock(sk); error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; /* Check for already bound sockets */ error = -EBUSY; if ((sk->sk_state & PPPOX_CONNECTED) && stage_session(sp->sa_addr.pppoe.sid)) goto end; /* Check for already disconnected sockets, on attempts to disconnect */ error = -EALREADY; if ((sk->sk_state & PPPOX_DEAD) && !stage_session(sp->sa_addr.pppoe.sid)) goto end; error = 0; /* Delete the old binding */ if (stage_session(po->pppoe_pa.sid)) { pppox_unbind_sock(sk); pn = pppoe_pernet(sock_net(sk)); delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } memset(sk_pppox(po) + 1, 0, sizeof(struct pppox_sock) - sizeof(struct sock)); sk->sk_state = PPPOX_NONE; } /* Re-bind in session stage only */ if (stage_session(sp->sa_addr.pppoe.sid)) { error = -ENODEV; net = sock_net(sk); dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); if (!dev) goto err_put; po->pppoe_dev = dev; po->pppoe_ifindex = dev->ifindex; pn = pppoe_pernet(net); if (!(dev->flags & IFF_UP)) { goto err_put; } memcpy(&po->pppoe_pa, &sp->sa_addr.pppoe, sizeof(struct pppoe_addr)); write_lock_bh(&pn->hash_lock); error = __set_item(pn, po); write_unlock_bh(&pn->hash_lock); if (error < 0) goto err_put; po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; error = ppp_register_net_channel(dev_net(dev), &po->chan); if (error) { delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); goto err_put; } sk->sk_state = PPPOX_CONNECTED; } po->num = sp->sa_addr.pppoe.sid; end: release_sock(sk); return error; err_put: if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } goto end; } static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OE; memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa, sizeof(struct pppoe_addr)); memcpy(uaddr, &sp, len); *usockaddr_len = len; return 0; } static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int val; int err; switch (cmd) { case PPPIOCGMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (put_user(po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN, (int __user *)arg)) break; err = 0; break; case PPPIOCSMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (get_user(val, (int __user *)arg)) break; if (val < (po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN)) err = 0; else err = -EINVAL; break; case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *)arg)) break; err = 0; break; case PPPOEIOCSFWD: { struct pppox_sock *relay_po; err = -EBUSY; if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD)) break; err = -ENOTCONN; if (!(sk->sk_state & PPPOX_CONNECTED)) break; /* PPPoE address from the user specifies an outbound PPPoE address which frames are forwarded to */ err = -EFAULT; if (copy_from_user(&po->pppoe_relay, (void __user *)arg, sizeof(struct sockaddr_pppox))) break; err = -EINVAL; if (po->pppoe_relay.sa_family != AF_PPPOX || po->pppoe_relay.sa_protocol != PX_PROTO_OE) break; /* Check that the socket referenced by the address actually exists. */ relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (!relay_po) break; sock_put(sk_pppox(relay_po)); sk->sk_state |= PPPOX_RELAY; err = 0; break; } case PPPOEIOCDFWD: err = -EALREADY; if (!(sk->sk_state & PPPOX_RELAY)) break; sk->sk_state &= ~PPPOX_RELAY; err = 0; break; default: err = -ENOTTY; } return err; } static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sk_buff *skb; struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int error; struct pppoe_hdr hdr; struct pppoe_hdr *ph; struct net_device *dev; char *start; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { error = -ENOTCONN; goto end; } hdr.ver = 1; hdr.type = 1; hdr.code = 0; hdr.sid = po->num; dev = po->pppoe_dev; error = -EMSGSIZE; if (total_len > (dev->mtu + dev->hard_header_len)) goto end; skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ skb_reserve(skb, dev->hard_header_len); skb_reset_network_header(skb); skb->dev = dev; skb->priority = sk->sk_priority; skb->protocol = cpu_to_be16(ETH_P_PPP_SES); ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); start = (char *)&ph->tag[0]; error = memcpy_fromiovec(start, m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); goto end; } error = total_len; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, total_len); memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); ph->length = htons(total_len); dev_queue_xmit(skb); end: release_sock(sk); return error; } /************************************************************************ * * xmit function for internal use. * ***********************************************************************/ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; struct pppoe_hdr *ph; int data_len = skb->len; /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP * xmit operations conclude prior to an unregistration call. Thus * sk->sk_state cannot change, so we don't need to do lock_sock(). * But, we also can't do a lock_sock since that introduces a potential * deadlock as we'd reverse the lock ordering used when calling * ppp_unregister_channel(). */ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; if (!dev) goto abort; /* Copy the data if there is no space for the header or if it's * read-only. */ if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) goto abort; __skb_push(skb, sizeof(*ph)); skb_reset_network_header(skb); ph = pppoe_hdr(skb); ph->ver = 1; ph->type = 1; ph->code = 0; ph->sid = po->num; ph->length = htons(data_len); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); skb->dev = dev; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, data_len); dev_queue_xmit(skb); return 1; abort: kfree_skb(skb); return 1; } /************************************************************************ * * xmit function called by generic PPP driver * sends PPP frame over PPPoE socket * ***********************************************************************/ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) { struct sock *sk = (struct sock *)chan->private; return __pppoe_xmit(sk, skb); } static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; goto end; } skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); if (error < 0) goto end; m->msg_namelen = 0; if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) error = total_len; } kfree_skb(skb); end: return error; } #ifdef CONFIG_PROC_FS static int pppoe_seq_show(struct seq_file *seq, void *v) { struct pppox_sock *po; char *dev_name; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Id Address Device\n"); goto out; } po = v; dev_name = po->pppoe_pa.dev; seq_printf(seq, "%08X %pM %8s\n", po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); out: return 0; } static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos) { struct pppox_sock *po; int i; for (i = 0; i < PPPOE_HASH_SIZE; i++) { po = pn->hash_table[i]; while (po) { if (!pos--) goto out; po = po->next; } } out: return po; } static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) __acquires(pn->hash_lock) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); loff_t l = *pos; read_lock_bh(&pn->hash_lock); return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; } static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); struct pppox_sock *po; ++*pos; if (v == SEQ_START_TOKEN) { po = pppoe_get_idx(pn, 0); goto out; } po = v; if (po->next) po = po->next; else { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); po = NULL; while (++hash < PPPOE_HASH_SIZE) { po = pn->hash_table[hash]; if (po) break; } } out: return po; } static void pppoe_seq_stop(struct seq_file *seq, void *v) __releases(pn->hash_lock) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); read_unlock_bh(&pn->hash_lock); } static const struct seq_operations pppoe_seq_ops = { .start = pppoe_seq_start, .next = pppoe_seq_next, .stop = pppoe_seq_stop, .show = pppoe_seq_show, }; static int pppoe_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &pppoe_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations pppoe_seq_fops = { .owner = THIS_MODULE, .open = pppoe_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* CONFIG_PROC_FS */ static const struct proto_ops pppoe_ops = { .family = AF_PPPOX, .owner = THIS_MODULE, .release = pppoe_release, .bind = sock_no_bind, .connect = pppoe_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = pppoe_sendmsg, .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, }; static const struct pppox_proto pppoe_proto = { .create = pppoe_create, .ioctl = pppoe_ioctl, .owner = THIS_MODULE, }; static __net_init int pppoe_init_net(struct net *net) { struct pppoe_net *pn = pppoe_pernet(net); struct proc_dir_entry *pde; rwlock_init(&pn->hash_lock); pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops); #ifdef CONFIG_PROC_FS if (!pde) return -ENOMEM; #endif return 0; } static __net_exit void pppoe_exit_net(struct net *net) { proc_net_remove(net, "pppoe"); } static struct pernet_operations pppoe_net_ops = { .init = pppoe_init_net, .exit = pppoe_exit_net, .id = &pppoe_net_id, .size = sizeof(struct pppoe_net), }; static int __init pppoe_init(void) { int err; err = register_pernet_device(&pppoe_net_ops); if (err) goto out; err = proto_register(&pppoe_sk_proto, 0); if (err) goto out_unregister_net_ops; err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); if (err) goto out_unregister_pppoe_proto; dev_add_pack(&pppoes_ptype); dev_add_pack(&pppoed_ptype); register_netdevice_notifier(&pppoe_notifier); return 0; out_unregister_pppoe_proto: proto_unregister(&pppoe_sk_proto); out_unregister_net_ops: unregister_pernet_device(&pppoe_net_ops); out: return err; } static void __exit pppoe_exit(void) { unregister_netdevice_notifier(&pppoe_notifier); dev_remove_pack(&pppoed_ptype); dev_remove_pack(&pppoes_ptype); unregister_pppox_proto(PX_PROTO_OE); proto_unregister(&pppoe_sk_proto); unregister_pernet_device(&pppoe_net_ops); } module_init(pppoe_init); module_exit(pppoe_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PPPOX);
gpl-2.0
MattCrystal/Tempest
drivers/usb/host/uhci-hcd.c
3419
25657
/* * Universal Host Controller Interface driver for USB. * * Maintainer: Alan Stern <stern@rowland.harvard.edu> * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com * (C) Copyright 1999 Randy Dunlap * (C) Copyright 1999 Georg Acher, acher@in.tum.de * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu * * Intel documents this fairly well, and as far as I know there * are no royalties or anything like that, but even so there are * people who decided that they want to do the same thing in a * completely different way. * */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/pm.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/bitops.h> #include <linux/dmi.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> #include "uhci-hcd.h" /* * Version Information */ #define DRIVER_AUTHOR \ "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, " \ "Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, " \ "Roman Weissgaerber, Alan Stern" #define DRIVER_DESC "USB Universal Host Controller Interface driver" /* for flakey hardware, ignore overcurrent indicators */ static bool ignore_oc; module_param(ignore_oc, bool, S_IRUGO); MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications"); /* * debug = 0, no debugging messages * debug = 1, dump failed URBs except for stalls * debug = 2, dump all failed URBs (including stalls) * show all queues in /sys/kernel/debug/uhci/[pci_addr] * debug = 3, show all TDs in URBs when dumping */ #ifdef DEBUG #define DEBUG_CONFIGURED 1 static int debug = 1; module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level"); #else #define DEBUG_CONFIGURED 0 #define debug 0 #endif static char *errbuf; #define ERRBUF_LEN (32 * 1024) static struct kmem_cache *uhci_up_cachep; /* urb_priv */ static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); static void wakeup_rh(struct uhci_hcd *uhci); static void uhci_get_current_frame_number(struct uhci_hcd *uhci); /* * Calculate the link pointer DMA value for the first Skeleton QH in a frame. */ static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame) { int skelnum; /* * The interrupt queues will be interleaved as evenly as possible. * There's not much to be done about period-1 interrupts; they have * to occur in every frame. But we can schedule period-2 interrupts * in odd-numbered frames, period-4 interrupts in frames congruent * to 2 (mod 4), and so on. This way each frame only has two * interrupt QHs, which will help spread out bandwidth utilization. * * ffs (Find First bit Set) does exactly what we need: * 1,3,5,... => ffs = 0 => use period-2 QH = skelqh[8], * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc. * ffs >= 7 => not on any high-period queue, so use * period-1 QH = skelqh[9]. * Add in UHCI_NUMFRAMES to insure at least one bit is set. */ skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES); if (skelnum <= 1) skelnum = 9; return LINK_TO_QH(uhci, uhci->skelqh[skelnum]); } #include "uhci-debug.c" #include "uhci-q.c" #include "uhci-hub.c" /* * Finish up a host controller reset and update the recorded state. */ static void finish_reset(struct uhci_hcd *uhci) { int port; /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect * bits in the port status and control registers. * We have to clear them by hand. */ for (port = 0; port < uhci->rh_numports; ++port) uhci_writew(uhci, 0, USBPORTSC1 + (port * 2)); uhci->port_c_suspend = uhci->resuming_ports = 0; uhci->rh_state = UHCI_RH_RESET; uhci->is_stopped = UHCI_IS_STOPPED; clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); } /* * Last rites for a defunct/nonfunctional controller * or one we don't want to use any more. */ static void uhci_hc_died(struct uhci_hcd *uhci) { uhci_get_current_frame_number(uhci); uhci->reset_hc(uhci); finish_reset(uhci); uhci->dead = 1; /* The current frame may already be partway finished */ ++uhci->frame_number; } /* * Initialize a controller that was newly discovered or has lost power * or otherwise been reset while it was suspended. In none of these cases * can we be sure of its previous state. */ static void check_and_reset_hc(struct uhci_hcd *uhci) { if (uhci->check_and_reset_hc(uhci)) finish_reset(uhci); } #if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC) /* * The two functions below are generic reset functions that are used on systems * that do not have keyboard and mouse legacy support. We assume that we are * running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined. */ /* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. */ static void uhci_generic_reset_hc(struct uhci_hcd *uhci) { /* Reset the HC - this will force us to get a * new notification of any already connected * ports due to the virtual disconnect that it * implies. */ uhci_writew(uhci, USBCMD_HCRESET, USBCMD); mb(); udelay(5); if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET) dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n"); /* Just to be safe, disable interrupt requests and * make sure the controller is stopped. */ uhci_writew(uhci, 0, USBINTR); uhci_writew(uhci, 0, USBCMD); } /* * Initialize a controller that was newly discovered or has just been * resumed. In either case we can't be sure of its previous state. * * Returns: 1 if the controller was reset, 0 otherwise. */ static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci) { unsigned int cmd, intr; /* * When restarting a suspended controller, we expect all the * settings to be the same as we left them: * * Controller is stopped and configured with EGSM set; * No interrupts enabled except possibly Resume Detect. * * If any of these conditions are violated we do a complete reset. */ cmd = uhci_readw(uhci, USBCMD); if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) { dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n", __func__, cmd); goto reset_needed; } intr = uhci_readw(uhci, USBINTR); if (intr & (~USBINTR_RESUME)) { dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n", __func__, intr); goto reset_needed; } return 0; reset_needed: dev_dbg(uhci_dev(uhci), "Performing full reset\n"); uhci_generic_reset_hc(uhci); return 1; } #endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */ /* * Store the basic register settings needed by the controller. */ static void configure_hc(struct uhci_hcd *uhci) { /* Set the frame length to the default: 1 ms exactly */ uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF); /* Store the frame list base address */ uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD); /* Set the current frame number */ uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER, USBFRNUM); /* perform any arch/bus specific configuration */ if (uhci->configure_hc) uhci->configure_hc(uhci); } static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) { /* If we have to ignore overcurrent events then almost by definition * we can't depend on resume-detect interrupts. */ if (ignore_oc) return 1; return uhci->resume_detect_interrupts_are_broken ? uhci->resume_detect_interrupts_are_broken(uhci) : 0; } static int global_suspend_mode_is_broken(struct uhci_hcd *uhci) { return uhci->global_suspend_mode_is_broken ? uhci->global_suspend_mode_is_broken(uhci) : 0; } static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) __releases(uhci->lock) __acquires(uhci->lock) { int auto_stop; int int_enable, egsm_enable, wakeup_enable; struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub; auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); dev_dbg(&rhdev->dev, "%s%s\n", __func__, (auto_stop ? " (auto-stop)" : "")); /* Start off by assuming Resume-Detect interrupts and EGSM work * and that remote wakeups should be enabled. */ egsm_enable = USBCMD_EGSM; int_enable = USBINTR_RESUME; wakeup_enable = 1; /* * In auto-stop mode, we must be able to detect new connections. * The user can force us to poll by disabling remote wakeup; * otherwise we will use the EGSM/RD mechanism. */ if (auto_stop) { if (!device_may_wakeup(&rhdev->dev)) egsm_enable = int_enable = 0; } #ifdef CONFIG_PM /* * In bus-suspend mode, we use the wakeup setting specified * for the root hub. */ else { if (!rhdev->do_remote_wakeup) wakeup_enable = 0; } #endif /* * UHCI doesn't distinguish between wakeup requests from downstream * devices and local connect/disconnect events. There's no way to * enable one without the other; both are controlled by EGSM. Thus * if wakeups are disallowed then EGSM must be turned off -- in which * case remote wakeup requests from downstream during system sleep * will be lost. * * In addition, if EGSM is broken then we can't use it. Likewise, * if Resume-Detect interrupts are broken then we can't use them. * * Finally, neither EGSM nor RD is useful by itself. Without EGSM, * the RD status bit will never get set. Without RD, the controller * won't generate interrupts to tell the system about wakeup events. */ if (!wakeup_enable || global_suspend_mode_is_broken(uhci) || resume_detect_interrupts_are_broken(uhci)) egsm_enable = int_enable = 0; uhci->RD_enable = !!int_enable; uhci_writew(uhci, int_enable, USBINTR); uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD); mb(); udelay(5); /* If we're auto-stopping then no devices have been attached * for a while, so there shouldn't be any active URBs and the * controller should stop after a few microseconds. Otherwise * we will give the controller one frame to stop. */ if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) { uhci->rh_state = UHCI_RH_SUSPENDING; spin_unlock_irq(&uhci->lock); msleep(1); spin_lock_irq(&uhci->lock); if (uhci->dead) return; } if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); uhci_get_current_frame_number(uhci); uhci->rh_state = new_state; uhci->is_stopped = UHCI_IS_STOPPED; /* * If remote wakeup is enabled but either EGSM or RD interrupts * doesn't work, then we won't get an interrupt when a wakeup event * occurs. Thus the suspended root hub needs to be polled. */ if (wakeup_enable && (!int_enable || !egsm_enable)) set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); else clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); uhci_scan_schedule(uhci); uhci_fsbr_off(uhci); } static void start_rh(struct uhci_hcd *uhci) { uhci->is_stopped = 0; /* Mark it configured and running with a 64-byte max packet. * All interrupts are enabled, even though RESUME won't do anything. */ uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD); uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, USBINTR); mb(); uhci->rh_state = UHCI_RH_RUNNING; set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags); } static void wakeup_rh(struct uhci_hcd *uhci) __releases(uhci->lock) __acquires(uhci->lock) { dev_dbg(&uhci_to_hcd(uhci)->self.root_hub->dev, "%s%s\n", __func__, uhci->rh_state == UHCI_RH_AUTO_STOPPED ? " (auto-start)" : ""); /* If we are auto-stopped then no devices are attached so there's * no need for wakeup signals. Otherwise we send Global Resume * for 20 ms. */ if (uhci->rh_state == UHCI_RH_SUSPENDED) { unsigned egsm; /* Keep EGSM on if it was set before */ egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM; uhci->rh_state = UHCI_RH_RESUMING; uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD); spin_unlock_irq(&uhci->lock); msleep(20); spin_lock_irq(&uhci->lock); if (uhci->dead) return; /* End Global Resume and wait for EOP to be sent */ uhci_writew(uhci, USBCMD_CF, USBCMD); mb(); udelay(4); if (uhci_readw(uhci, USBCMD) & USBCMD_FGR) dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); } start_rh(uhci); /* Restart root hub polling */ mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); } static irqreturn_t uhci_irq(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned short status; /* * Read the interrupt status, and write it back to clear the * interrupt cause. Contrary to the UHCI specification, the * "HC Halted" status bit is persistent: it is RO, not R/WC. */ status = uhci_readw(uhci, USBSTS); if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ return IRQ_NONE; uhci_writew(uhci, status, USBSTS); /* Clear it */ if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { if (status & USBSTS_HSE) dev_err(uhci_dev(uhci), "host system error, " "PCI problems?\n"); if (status & USBSTS_HCPE) dev_err(uhci_dev(uhci), "host controller process " "error, something bad happened!\n"); if (status & USBSTS_HCH) { spin_lock(&uhci->lock); if (uhci->rh_state >= UHCI_RH_RUNNING) { dev_err(uhci_dev(uhci), "host controller halted, " "very bad!\n"); if (debug > 1 && errbuf) { /* Print the schedule for debugging */ uhci_sprint_schedule(uhci, errbuf, ERRBUF_LEN); lprintk(errbuf); } uhci_hc_died(uhci); usb_hc_died(hcd); /* Force a callback in case there are * pending unlinks */ mod_timer(&hcd->rh_timer, jiffies); } spin_unlock(&uhci->lock); } } if (status & USBSTS_RD) usb_hcd_poll_rh_status(hcd); else { spin_lock(&uhci->lock); uhci_scan_schedule(uhci); spin_unlock(&uhci->lock); } return IRQ_HANDLED; } /* * Store the current frame number in uhci->frame_number if the controller * is running. Expand from 11 bits (of which we use only 10) to a * full-sized integer. * * Like many other parts of the driver, this code relies on being polled * more than once per second as long as the controller is running. */ static void uhci_get_current_frame_number(struct uhci_hcd *uhci) { if (!uhci->is_stopped) { unsigned delta; delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) & (UHCI_NUMFRAMES - 1); uhci->frame_number += delta; } } /* * De-allocate all resources */ static void release_uhci(struct uhci_hcd *uhci) { int i; if (DEBUG_CONFIGURED) { spin_lock_irq(&uhci->lock); uhci->is_initialized = 0; spin_unlock_irq(&uhci->lock); debugfs_remove(uhci->dentry); } for (i = 0; i < UHCI_NUM_SKELQH; i++) uhci_free_qh(uhci, uhci->skelqh[i]); uhci_free_td(uhci, uhci->term_td); dma_pool_destroy(uhci->qh_pool); dma_pool_destroy(uhci->td_pool); kfree(uhci->frame_cpu); dma_free_coherent(uhci_dev(uhci), UHCI_NUMFRAMES * sizeof(*uhci->frame), uhci->frame, uhci->frame_dma_handle); } /* * Allocate a frame list, and then setup the skeleton * * The hardware doesn't really know any difference * in the queues, but the order does matter for the * protocols higher up. The order in which the queues * are encountered by the hardware is: * * - All isochronous events are handled before any * of the queues. We don't do that here, because * we'll create the actual TD entries on demand. * - The first queue is the high-period interrupt queue. * - The second queue is the period-1 interrupt and async * (low-speed control, full-speed control, then bulk) queue. * - The third queue is the terminating bandwidth reclamation queue, * which contains no members, loops back to itself, and is present * only when FSBR is on and there are no full-speed control or bulk QHs. */ static int uhci_start(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); int retval = -EBUSY; int i; struct dentry __maybe_unused *dentry; hcd->uses_new_polling = 1; /* Accept arbitrarily long scatter-gather lists */ if (!(hcd->driver->flags & HCD_LOCAL_MEM)) hcd->self.sg_tablesize = ~0; spin_lock_init(&uhci->lock); setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout, (unsigned long) uhci); INIT_LIST_HEAD(&uhci->idle_qh_list); init_waitqueue_head(&uhci->waitqh); #ifdef UHCI_DEBUG_OPS dentry = debugfs_create_file(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci, &uhci_debug_operations); if (!dentry) { dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n"); return -ENOMEM; } uhci->dentry = dentry; #endif uhci->frame = dma_alloc_coherent(uhci_dev(uhci), UHCI_NUMFRAMES * sizeof(*uhci->frame), &uhci->frame_dma_handle, 0); if (!uhci->frame) { dev_err(uhci_dev(uhci), "unable to allocate " "consistent memory for frame list\n"); goto err_alloc_frame; } memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame)); uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu), GFP_KERNEL); if (!uhci->frame_cpu) { dev_err(uhci_dev(uhci), "unable to allocate " "memory for frame pointers\n"); goto err_alloc_frame_cpu; } uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci), sizeof(struct uhci_td), 16, 0); if (!uhci->td_pool) { dev_err(uhci_dev(uhci), "unable to create td dma_pool\n"); goto err_create_td_pool; } uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci), sizeof(struct uhci_qh), 16, 0); if (!uhci->qh_pool) { dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n"); goto err_create_qh_pool; } uhci->term_td = uhci_alloc_td(uhci); if (!uhci->term_td) { dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n"); goto err_alloc_term_td; } for (i = 0; i < UHCI_NUM_SKELQH; i++) { uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL); if (!uhci->skelqh[i]) { dev_err(uhci_dev(uhci), "unable to allocate QH\n"); goto err_alloc_skelqh; } } /* * 8 Interrupt queues; link all higher int queues to int1 = async */ for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i) uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh); uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci); uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh); /* This dummy TD is to work around a bug in Intel PIIX controllers */ uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) | (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); uhci->term_td->link = UHCI_PTR_TERM(uhci); uhci->skel_async_qh->element = uhci->skel_term_qh->element = LINK_TO_TD(uhci, uhci->term_td); /* * Fill the frame list: make all entries point to the proper * interrupt queue. */ for (i = 0; i < UHCI_NUMFRAMES; i++) { /* Only place we don't use the frame list routines */ uhci->frame[i] = uhci_frame_skel_link(uhci, i); } /* * Some architectures require a full mb() to enforce completion of * the memory writes above before the I/O transfers in configure_hc(). */ mb(); configure_hc(uhci); uhci->is_initialized = 1; spin_lock_irq(&uhci->lock); start_rh(uhci); spin_unlock_irq(&uhci->lock); return 0; /* * error exits: */ err_alloc_skelqh: for (i = 0; i < UHCI_NUM_SKELQH; i++) { if (uhci->skelqh[i]) uhci_free_qh(uhci, uhci->skelqh[i]); } uhci_free_td(uhci, uhci->term_td); err_alloc_term_td: dma_pool_destroy(uhci->qh_pool); err_create_qh_pool: dma_pool_destroy(uhci->td_pool); err_create_td_pool: kfree(uhci->frame_cpu); err_alloc_frame_cpu: dma_free_coherent(uhci_dev(uhci), UHCI_NUMFRAMES * sizeof(*uhci->frame), uhci->frame, uhci->frame_dma_handle); err_alloc_frame: debugfs_remove(uhci->dentry); return retval; } static void uhci_stop(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); spin_lock_irq(&uhci->lock); if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead) uhci_hc_died(uhci); uhci_scan_schedule(uhci); spin_unlock_irq(&uhci->lock); synchronize_irq(hcd->irq); del_timer_sync(&uhci->fsbr_timer); release_uhci(uhci); } #ifdef CONFIG_PM static int uhci_rh_suspend(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); int rc = 0; spin_lock_irq(&uhci->lock); if (!HCD_HW_ACCESSIBLE(hcd)) rc = -ESHUTDOWN; else if (uhci->dead) ; /* Dead controllers tell no tales */ /* Once the controller is stopped, port resumes that are already * in progress won't complete. Hence if remote wakeup is enabled * for the root hub and any ports are in the middle of a resume or * remote wakeup, we must fail the suspend. */ else if (hcd->self.root_hub->do_remote_wakeup && uhci->resuming_ports) { dev_dbg(uhci_dev(uhci), "suspend failed because a port " "is resuming\n"); rc = -EBUSY; } else suspend_rh(uhci, UHCI_RH_SUSPENDED); spin_unlock_irq(&uhci->lock); return rc; } static int uhci_rh_resume(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); int rc = 0; spin_lock_irq(&uhci->lock); if (!HCD_HW_ACCESSIBLE(hcd)) rc = -ESHUTDOWN; else if (!uhci->dead) wakeup_rh(uhci); spin_unlock_irq(&uhci->lock); return rc; } #endif /* Wait until a particular device/endpoint's QH is idle, and free it */ static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); struct uhci_qh *qh; spin_lock_irq(&uhci->lock); qh = (struct uhci_qh *) hep->hcpriv; if (qh == NULL) goto done; while (qh->state != QH_STATE_IDLE) { ++uhci->num_waiting; spin_unlock_irq(&uhci->lock); wait_event_interruptible(uhci->waitqh, qh->state == QH_STATE_IDLE); spin_lock_irq(&uhci->lock); --uhci->num_waiting; } uhci_free_qh(uhci, qh); done: spin_unlock_irq(&uhci->lock); } static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned frame_number; unsigned delta; /* Minimize latency by avoiding the spinlock */ frame_number = uhci->frame_number; barrier(); delta = (uhci_readw(uhci, USBFRNUM) - frame_number) & (UHCI_NUMFRAMES - 1); return frame_number + delta; } /* Determines number of ports on controller */ static int uhci_count_ports(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned io_size = (unsigned) hcd->rsrc_len; int port; /* The UHCI spec says devices must have 2 ports, and goes on to say * they may have more but gives no way to determine how many there * are. However according to the UHCI spec, Bit 7 of the port * status and control register is always set to 1. So we try to * use this to our advantage. Another common failure mode when * a nonexistent register is addressed is to return all ones, so * we test for that also. */ for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { unsigned int portstatus; portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2)); if (!(portstatus & 0x0080) || portstatus == 0xffff) break; } if (debug) dev_info(uhci_dev(uhci), "detected %d ports\n", port); /* Anything greater than 7 is weird so we'll ignore it. */ if (port > UHCI_RH_MAXCHILD) { dev_info(uhci_dev(uhci), "port count misdetected? " "forcing to 2 ports\n"); port = 2; } return port; } static const char hcd_name[] = "uhci_hcd"; #ifdef CONFIG_PCI #include "uhci-pci.c" #define PCI_DRIVER uhci_pci_driver #endif #ifdef CONFIG_SPARC_LEON #include "uhci-grlib.c" #define PLATFORM_DRIVER uhci_grlib_driver #endif #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) #error "missing bus glue for uhci-hcd" #endif static int __init uhci_hcd_init(void) { int retval = -ENOMEM; if (usb_disabled()) return -ENODEV; printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n", ignore_oc ? ", overcurrent ignored" : ""); set_bit(USB_UHCI_LOADED, &usb_hcds_loaded); if (DEBUG_CONFIGURED) { errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); if (!errbuf) goto errbuf_failed; uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root); if (!uhci_debugfs_root) goto debug_failed; } uhci_up_cachep = kmem_cache_create("uhci_urb_priv", sizeof(struct urb_priv), 0, 0, NULL); if (!uhci_up_cachep) goto up_failed; #ifdef PLATFORM_DRIVER retval = platform_driver_register(&PLATFORM_DRIVER); if (retval < 0) goto clean0; #endif #ifdef PCI_DRIVER retval = pci_register_driver(&PCI_DRIVER); if (retval < 0) goto clean1; #endif return 0; #ifdef PCI_DRIVER clean1: #endif #ifdef PLATFORM_DRIVER platform_driver_unregister(&PLATFORM_DRIVER); clean0: #endif kmem_cache_destroy(uhci_up_cachep); up_failed: debugfs_remove(uhci_debugfs_root); debug_failed: kfree(errbuf); errbuf_failed: clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded); return retval; } static void __exit uhci_hcd_cleanup(void) { #ifdef PLATFORM_DRIVER platform_driver_unregister(&PLATFORM_DRIVER); #endif #ifdef PCI_DRIVER pci_unregister_driver(&PCI_DRIVER); #endif kmem_cache_destroy(uhci_up_cachep); debugfs_remove(uhci_debugfs_root); kfree(errbuf); clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded); } module_init(uhci_hcd_init); module_exit(uhci_hcd_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
Shabbypenguin/Jellybean_kernel
drivers/media/dvb/ttusb-dec/ttusbdecfe.c
4955
7875
/* * TTUSB DEC Frontend Driver * * Copyright (C) 2003-2004 Alex Woods <linux-dvb@giblets.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "dvb_frontend.h" #include "ttusbdecfe.h" #define LOF_HI 10600000 #define LOF_LO 9750000 struct ttusbdecfe_state { /* configuration settings */ const struct ttusbdecfe_config* config; struct dvb_frontend frontend; u8 hi_band; u8 voltage; }; static int ttusbdecfe_dvbs_read_status(struct dvb_frontend *fe, fe_status_t *status) { *status = FE_HAS_SIGNAL | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK; return 0; } static int ttusbdecfe_dvbt_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct ttusbdecfe_state* state = fe->demodulator_priv; u8 b[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 result[4]; int len, ret; *status=0; ret=state->config->send_command(fe, 0x73, sizeof(b), b, &len, result); if(ret) return ret; if(len != 4) { printk(KERN_ERR "%s: unexpected reply\n", __func__); return -EIO; } switch(result[3]) { case 1: /* not tuned yet */ case 2: /* no signal/no lock*/ break; case 3: /* signal found and locked*/ *status = FE_HAS_SIGNAL | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK; break; case 4: *status = FE_TIMEDOUT; break; default: pr_info("%s: returned unknown value: %d\n", __func__, result[3]); return -EIO; } return 0; } static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; u8 b[] = { 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0xff }; __be32 freq = htonl(p->frequency / 1000); memcpy(&b[4], &freq, sizeof (u32)); state->config->send_command(fe, 0x71, sizeof(b), b, NULL, NULL); return 0; } static int ttusbdecfe_dvbt_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings) { fesettings->min_delay_ms = 1500; /* Drift compensation makes no sense for DVB-T */ fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; u8 b[] = { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; __be32 freq; __be32 sym_rate; __be32 band; __be32 lnb_voltage; freq = htonl(p->frequency + (state->hi_band ? LOF_HI : LOF_LO)); memcpy(&b[4], &freq, sizeof(u32)); sym_rate = htonl(p->u.qam.symbol_rate); memcpy(&b[12], &sym_rate, sizeof(u32)); band = htonl(state->hi_band ? LOF_HI : LOF_LO); memcpy(&b[24], &band, sizeof(u32)); lnb_voltage = htonl(state->voltage); memcpy(&b[28], &lnb_voltage, sizeof(u32)); state->config->send_command(fe, 0x71, sizeof(b), b, NULL, NULL); return 0; } static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; u8 b[] = { 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; memcpy(&b[4], cmd->msg, cmd->msg_len); state->config->send_command(fe, 0x72, sizeof(b) - (6 - cmd->msg_len), b, NULL, NULL); return 0; } static int ttusbdecfe_dvbs_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; state->hi_band = (SEC_TONE_ON == tone); return 0; } static int ttusbdecfe_dvbs_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; switch (voltage) { case SEC_VOLTAGE_13: state->voltage = 13; break; case SEC_VOLTAGE_18: state->voltage = 18; break; default: return -EINVAL; } return 0; } static void ttusbdecfe_release(struct dvb_frontend* fe) { struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops ttusbdecfe_dvbt_ops; struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* config) { struct ttusbdecfe_state* state = NULL; /* allocate memory for the internal state */ state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); if (state == NULL) return NULL; /* setup the state */ state->config = config; /* create dvb_frontend */ memcpy(&state->frontend.ops, &ttusbdecfe_dvbt_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops ttusbdecfe_dvbs_ops; struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* config) { struct ttusbdecfe_state* state = NULL; /* allocate memory for the internal state */ state = kmalloc(sizeof(struct ttusbdecfe_state), GFP_KERNEL); if (state == NULL) return NULL; /* setup the state */ state->config = config; state->voltage = 0; state->hi_band = 0; /* create dvb_frontend */ memcpy(&state->frontend.ops, &ttusbdecfe_dvbs_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; } static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = { .info = { .name = "TechnoTrend/Hauppauge DEC2000-t Frontend", .type = FE_OFDM, .frequency_min = 51000000, .frequency_max = 858000000, .frequency_stepsize = 62500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO, }, .release = ttusbdecfe_release, .set_frontend = ttusbdecfe_dvbt_set_frontend, .get_tune_settings = ttusbdecfe_dvbt_get_tune_settings, .read_status = ttusbdecfe_dvbt_read_status, }; static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = { .info = { .name = "TechnoTrend/Hauppauge DEC3000-s Frontend", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 125, .symbol_rate_min = 1000000, /* guessed */ .symbol_rate_max = 45000000, /* guessed */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK }, .release = ttusbdecfe_release, .set_frontend = ttusbdecfe_dvbs_set_frontend, .read_status = ttusbdecfe_dvbs_read_status, .diseqc_send_master_cmd = ttusbdecfe_dvbs_diseqc_send_master_cmd, .set_voltage = ttusbdecfe_dvbs_set_voltage, .set_tone = ttusbdecfe_dvbs_set_tone, }; MODULE_DESCRIPTION("TTUSB DEC DVB-T/S Demodulator driver"); MODULE_AUTHOR("Alex Woods/Andrew de Quincey"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ttusbdecfe_dvbt_attach); EXPORT_SYMBOL(ttusbdecfe_dvbs_attach);
gpl-2.0
rajib4/android_kernel_xiaomi_cancro
drivers/net/arcnet/arcnet.c
7771
30420
/* * Linux ARCnet driver - device-independent routines * * Written 1997 by David Woodhouse. * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * The change log is now in a file called ChangeLog in this directory. * * Sources: * - Crynwr arcnet.com/arcether.com packet drivers. * - arcnet.c v0.00 dated 1/1/94 and apparently by * Donald Becker - it didn't work :) * - skeleton.c v0.05 dated 11/16/93 by Donald Becker * (from Linux Kernel 1.1.45) * - RFC's 1201 and 1051 - re: TCP/IP over ARCnet * - The official ARCnet COM9026 data sheets (!) thanks to * Ken Cornetet <kcornete@nyx10.cs.du.edu> * - The official ARCnet COM20020 data sheets. * - Information on some more obscure ARCnet controller chips, thanks * to the nice people at SMSC. * - net/inet/eth.c (from kernel 1.1.50) for header-building info. * - Alternate Linux ARCnet source by V.Shergin <vsher@sao.stavropol.su> * - Textual information and more alternate source from Joachim Koenig * <jojo@repas.de> */ #define VERSION "arcnet: v3.94 BETA 2007/02/08 - by Avery Pennarun et al.\n" #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <net/arp.h> #include <linux/init.h> #include <linux/arcdevice.h> #include <linux/jiffies.h> /* "do nothing" functions for protocol drivers */ static void null_rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int null_build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr); static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum); static void arcnet_rx(struct net_device *dev, int bufnum); /* * one ArcProto per possible proto ID. None of the elements of * arc_proto_map are allowed to be NULL; they will get set to * arc_proto_default instead. It also must not be NULL; if you would like * to set it to NULL, set it to &arc_proto_null instead. */ struct ArcProto *arc_proto_map[256], *arc_proto_default, *arc_bcast_proto, *arc_raw_proto; static struct ArcProto arc_proto_null = { .suffix = '?', .mtu = XMTU, .is_ip = 0, .rx = null_rx, .build_header = null_build_header, .prepare_tx = null_prepare_tx, .continue_tx = NULL, .ack_tx = NULL }; /* Exported function prototypes */ int arcnet_debug = ARCNET_DEBUG; EXPORT_SYMBOL(arc_proto_map); EXPORT_SYMBOL(arc_proto_default); EXPORT_SYMBOL(arc_bcast_proto); EXPORT_SYMBOL(arc_raw_proto); EXPORT_SYMBOL(arcnet_unregister_proto); EXPORT_SYMBOL(arcnet_debug); EXPORT_SYMBOL(alloc_arcdev); EXPORT_SYMBOL(arcnet_interrupt); EXPORT_SYMBOL(arcnet_open); EXPORT_SYMBOL(arcnet_close); EXPORT_SYMBOL(arcnet_send_packet); EXPORT_SYMBOL(arcnet_timeout); /* Internal function prototypes */ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); static int arcnet_rebuild_header(struct sk_buff *skb); static int go_tx(struct net_device *dev); static int debug = ARCNET_DEBUG; module_param(debug, int, 0); MODULE_LICENSE("GPL"); static int __init arcnet_init(void) { int count; arcnet_debug = debug; printk("arcnet loaded.\n"); #ifdef ALPHA_WARNING BUGLVL(D_EXTRA) { printk("arcnet: ***\n" "arcnet: * Read arcnet.txt for important release notes!\n" "arcnet: *\n" "arcnet: * This is an ALPHA version! (Last stable release: v3.02) E-mail\n" "arcnet: * me if you have any questions, comments, or bug reports.\n" "arcnet: ***\n"); } #endif /* initialize the protocol map */ arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null; for (count = 0; count < 256; count++) arc_proto_map[count] = arc_proto_default; BUGLVL(D_DURING) printk("arcnet: struct sizes: %Zd %Zd %Zd %Zd %Zd\n", sizeof(struct arc_hardware), sizeof(struct arc_rfc1201), sizeof(struct arc_rfc1051), sizeof(struct arc_eth_encap), sizeof(struct archdr)); return 0; } static void __exit arcnet_exit(void) { } module_init(arcnet_init); module_exit(arcnet_exit); /* * Dump the contents of an sk_buff */ #if ARCNET_DEBUG_MAX & D_SKB void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) { char hdr[32]; /* dump the packet */ snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc); print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, true); } EXPORT_SYMBOL(arcnet_dump_skb); #endif /* * Dump the contents of an ARCnet buffer */ #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) static void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, int take_arcnet_lock) { struct arcnet_local *lp = netdev_priv(dev); int i, length; unsigned long flags = 0; static uint8_t buf[512]; char hdr[32]; /* hw.copy_from_card expects IRQ context so take the IRQ lock to keep it single threaded */ if(take_arcnet_lock) spin_lock_irqsave(&lp->lock, flags); lp->hw.copy_from_card(dev, bufnum, 0, buf, 512); if(take_arcnet_lock) spin_unlock_irqrestore(&lp->lock, flags); /* if the offset[0] byte is nonzero, this is a 256-byte packet */ length = (buf[2] ? 256 : 512); /* dump the packet */ snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc); print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, 16, 1, buf, length, true); } #else #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0) #endif /* * Unregister a protocol driver from the arc_proto_map. Protocol drivers * are responsible for registering themselves, but the unregister routine * is pretty generic so we'll do it here. */ void arcnet_unregister_proto(struct ArcProto *proto) { int count; if (arc_proto_default == proto) arc_proto_default = &arc_proto_null; if (arc_bcast_proto == proto) arc_bcast_proto = arc_proto_default; if (arc_raw_proto == proto) arc_raw_proto = arc_proto_default; for (count = 0; count < 256; count++) { if (arc_proto_map[count] == proto) arc_proto_map[count] = arc_proto_default; } } /* * Add a buffer to the queue. Only the interrupt handler is allowed to do * this, unless interrupts are disabled. * * Note: we don't check for a full queue, since there aren't enough buffers * to more than fill it. */ static void release_arcbuf(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); int i; lp->buf_queue[lp->first_free_buf++] = bufnum; lp->first_free_buf %= 5; BUGLVL(D_DURING) { BUGMSG(D_DURING, "release_arcbuf: freed #%d; buffer queue is now: ", bufnum); for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5) BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]); BUGMSG2(D_DURING, "\n"); } } /* * Get a buffer from the queue. If this returns -1, there are no buffers * available. */ static int get_arcbuf(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int buf = -1, i; if (!atomic_dec_and_test(&lp->buf_lock)) { /* already in this function */ BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n", lp->buf_lock.counter); } else { /* we can continue */ if (lp->next_buf >= 5) lp->next_buf -= 5; if (lp->next_buf == lp->first_free_buf) BUGMSG(D_NORMAL, "get_arcbuf: BUG: no buffers are available??\n"); else { buf = lp->buf_queue[lp->next_buf++]; lp->next_buf %= 5; } } BUGLVL(D_DURING) { BUGMSG(D_DURING, "get_arcbuf: got #%d; buffer queue is now: ", buf); for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5) BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]); BUGMSG2(D_DURING, "\n"); } atomic_inc(&lp->buf_lock); return buf; } static int choose_mtu(void) { int count, mtu = 65535; /* choose the smallest MTU of all available encaps */ for (count = 0; count < 256; count++) { if (arc_proto_map[count] != &arc_proto_null && arc_proto_map[count]->mtu < mtu) { mtu = arc_proto_map[count]->mtu; } } return mtu == 65535 ? XMTU : mtu; } static const struct header_ops arcnet_header_ops = { .create = arcnet_header, .rebuild = arcnet_rebuild_header, }; static const struct net_device_ops arcnet_netdev_ops = { .ndo_open = arcnet_open, .ndo_stop = arcnet_close, .ndo_start_xmit = arcnet_send_packet, .ndo_tx_timeout = arcnet_timeout, }; /* Setup a struct device for ARCnet. */ static void arcdev_setup(struct net_device *dev) { dev->type = ARPHRD_ARCNET; dev->netdev_ops = &arcnet_netdev_ops; dev->header_ops = &arcnet_header_ops; dev->hard_header_len = sizeof(struct archdr); dev->mtu = choose_mtu(); dev->addr_len = ARCNET_ALEN; dev->tx_queue_len = 100; dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */ dev->watchdog_timeo = TX_TIMEOUT; /* New-style flags. */ dev->flags = IFF_BROADCAST; } struct net_device *alloc_arcdev(const char *name) { struct net_device *dev; dev = alloc_netdev(sizeof(struct arcnet_local), name && *name ? name : "arc%d", arcdev_setup); if(dev) { struct arcnet_local *lp = netdev_priv(dev); spin_lock_init(&lp->lock); } return dev; } /* * Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even registers * that "should" only need to be set once at boot, so that there is * non-reboot way to recover if something goes wrong. */ int arcnet_open(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int count, newmtu, error; BUGMSG(D_INIT,"opened."); if (!try_module_get(lp->hw.owner)) return -ENODEV; BUGLVL(D_PROTO) { BUGMSG(D_PROTO, "protocol map (default is '%c'): ", arc_proto_default->suffix); for (count = 0; count < 256; count++) BUGMSG2(D_PROTO, "%c", arc_proto_map[count]->suffix); BUGMSG2(D_PROTO, "\n"); } BUGMSG(D_INIT, "arcnet_open: resetting card.\n"); /* try to put the card in a defined state - if it fails the first * time, actually reset it. */ error = -ENODEV; if (ARCRESET(0) && ARCRESET(1)) goto out_module_put; newmtu = choose_mtu(); if (newmtu < dev->mtu) dev->mtu = newmtu; BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu); /* autodetect the encapsulation for each host. */ memset(lp->default_proto, 0, sizeof(lp->default_proto)); /* the broadcast address is special - use the 'bcast' protocol */ for (count = 0; count < 256; count++) { if (arc_proto_map[count] == arc_bcast_proto) { lp->default_proto[0] = count; break; } } /* initialize buffers */ atomic_set(&lp->buf_lock, 1); lp->next_buf = lp->first_free_buf = 0; release_arcbuf(dev, 0); release_arcbuf(dev, 1); release_arcbuf(dev, 2); release_arcbuf(dev, 3); lp->cur_tx = lp->next_tx = -1; lp->cur_rx = -1; lp->rfc1201.sequence = 1; /* bring up the hardware driver */ if (lp->hw.open) lp->hw.open(dev); if (dev->dev_addr[0] == 0) BUGMSG(D_NORMAL, "WARNING! Station address 00 is reserved " "for broadcasts!\n"); else if (dev->dev_addr[0] == 255) BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " "DOS networking programs!\n"); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); if (ASTATUS() & RESETflag) { BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); ACOMMAND(CFLAGScmd | RESETclear); } BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); /* make sure we're ready to receive IRQ's. */ AINTMASK(0); udelay(1); /* give it time to set the mask before * we reset it again. (may not even be * necessary) */ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); lp->intmask = NORXflag | RECONflag; AINTMASK(lp->intmask); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); netif_start_queue(dev); return 0; out_module_put: module_put(lp->hw.owner); return error; } /* The inverse routine to arcnet_open - shuts down the card. */ int arcnet_close(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); netif_stop_queue(dev); /* flush TX and disable RX */ AINTMASK(0); ACOMMAND(NOTXcmd); /* stop transmit */ ACOMMAND(NORXcmd); /* disable receive */ mdelay(1); /* shut down the card */ lp->hw.close(dev); module_put(lp->hw.owner); return 0; } static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct arcnet_local *lp = netdev_priv(dev); uint8_t _daddr, proto_num; struct ArcProto *proto; BUGMSG(D_DURING, "create header from %d to %d; protocol %d (%Xh); size %u.\n", saddr ? *(uint8_t *) saddr : -1, daddr ? *(uint8_t *) daddr : -1, type, type, len); if (skb->len!=0 && len != skb->len) BUGMSG(D_NORMAL, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n", skb->len, len); /* Type is host order - ? */ if(type == ETH_P_ARCNET) { proto = arc_raw_proto; BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix); _daddr = daddr ? *(uint8_t *) daddr : 0; } else if (!daddr) { /* * if the dest addr isn't provided, we can't choose an encapsulation! * Store the packet type (eg. ETH_P_IP) for now, and we'll push on a * real header when we do rebuild_header. */ *(uint16_t *) skb_push(skb, 2) = type; /* * XXX: Why not use skb->mac_len? */ if (skb->network_header - skb->mac_header != 2) BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", (int)(skb->network_header - skb->mac_header)); return -2; /* return error -- can't transmit yet! */ } else { /* otherwise, we can just add the header as usual. */ _daddr = *(uint8_t *) daddr; proto_num = lp->default_proto[_daddr]; proto = arc_proto_map[proto_num]; BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n", proto_num, proto->suffix); if (proto == &arc_proto_null && arc_bcast_proto != proto) { BUGMSG(D_DURING, "actually, let's use '%c' instead.\n", arc_bcast_proto->suffix); proto = arc_bcast_proto; } } return proto->build_header(skb, dev, type, _daddr); } /* * Rebuild the ARCnet hard header. This is called after an ARP (or in the * future other address resolution) has completed on this sk_buff. We now * let ARP fill in the destination field. */ static int arcnet_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct arcnet_local *lp = netdev_priv(dev); int status = 0; /* default is failure */ unsigned short type; uint8_t daddr=0; struct ArcProto *proto; /* * XXX: Why not use skb->mac_len? */ if (skb->network_header - skb->mac_header != 2) { BUGMSG(D_NORMAL, "rebuild_header: shouldn't be here! (hdrsize=%d)\n", (int)(skb->network_header - skb->mac_header)); return 0; } type = *(uint16_t *) skb_pull(skb, 2); BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type); if (type == ETH_P_IP) { #ifdef CONFIG_INET BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type); status = arp_find(&daddr, skb) ? 1 : 0; BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n", daddr, type); #endif } else { BUGMSG(D_NORMAL, "I don't understand ethernet protocol %Xh addresses!\n", type); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; } /* if we couldn't resolve the address... give up. */ if (!status) return 0; /* add the _real_ header this time! */ proto = arc_proto_map[lp->default_proto[daddr]]; proto->build_header(skb, dev, type, daddr); return 1; /* success */ } /* Called by the kernel in order to transmit a packet. */ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); struct archdr *pkt; struct arc_rfc1201 *soft; struct ArcProto *proto; int txbuf; unsigned long flags; int freeskb, retval; BUGMSG(D_DURING, "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol); pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; proto = arc_proto_map[soft->proto]; BUGMSG(D_SKB_SIZE, "skb: transmitting %d bytes to %02X\n", skb->len, pkt->hard.dest); BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "tx"); /* fits in one packet? */ if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) { BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n"); dev_kfree_skb(skb); return NETDEV_TX_OK; /* don't try again */ } /* We're busy transmitting a packet... */ netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); AINTMASK(0); if(lp->next_tx == -1) txbuf = get_arcbuf(dev); else { txbuf = -1; } if (txbuf != -1) { if (proto->prepare_tx(dev, pkt, skb->len, txbuf) && !proto->ack_tx) { /* done right away and we don't want to acknowledge the package later - forget about it now */ dev->stats.tx_bytes += skb->len; freeskb = 1; } else { /* do it the 'split' way */ lp->outgoing.proto = proto; lp->outgoing.skb = skb; lp->outgoing.pkt = pkt; freeskb = 0; if (proto->continue_tx && proto->continue_tx(dev, txbuf)) { BUGMSG(D_NORMAL, "bug! continue_tx finished the first time! " "(proto='%c')\n", proto->suffix); } } retval = NETDEV_TX_OK; lp->next_tx = txbuf; } else { retval = NETDEV_TX_BUSY; freeskb = 0; } BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); /* make sure we didn't ignore a TX IRQ while we were in here */ AINTMASK(0); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); lp->intmask |= TXFREEflag|EXCNAKflag; AINTMASK(lp->intmask); BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); spin_unlock_irqrestore(&lp->lock, flags); if (freeskb) { dev_kfree_skb(skb); } return retval; /* no need to try again */ } /* * Actually start transmitting a packet that was loaded into a buffer * by prepare_tx. This should _only_ be called by the interrupt handler. */ static int go_tx(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n", ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx); if (lp->cur_tx != -1 || lp->next_tx == -1) return 0; BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0); lp->cur_tx = lp->next_tx; lp->next_tx = -1; /* start sending */ ACOMMAND(TXcmd | (lp->cur_tx << 3)); dev->stats.tx_packets++; lp->lasttrans_dest = lp->lastload_dest; lp->lastload_dest = 0; lp->excnak_pending = 0; lp->intmask |= TXFREEflag|EXCNAKflag; return 1; } /* Called by the kernel when transmit times out */ void arcnet_timeout(struct net_device *dev) { unsigned long flags; struct arcnet_local *lp = netdev_priv(dev); int status = ASTATUS(); char *msg; spin_lock_irqsave(&lp->lock, flags); if (status & TXFREEflag) { /* transmit _DID_ finish */ msg = " - missed IRQ?"; } else { msg = ""; dev->stats.tx_aborted_errors++; lp->timed_out = 1; ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); } dev->stats.tx_errors++; /* make sure we didn't miss a TX or a EXC NAK IRQ */ AINTMASK(0); lp->intmask |= TXFREEflag|EXCNAKflag; AINTMASK(lp->intmask); spin_unlock_irqrestore(&lp->lock, flags); if (time_after(jiffies, lp->last_timeout + 10*HZ)) { BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", msg, status, lp->intmask, lp->lasttrans_dest); lp->last_timeout = jiffies; } if (lp->cur_tx == -1) netif_wake_queue(dev); } /* * The typical workload of the driver: Handle the network interface * interrupts. Establish which device needs attention, and call the correct * chipset interrupt handler. */ irqreturn_t arcnet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct arcnet_local *lp; int recbuf, status, diagstatus, didsomething, boguscount; int retval = IRQ_NONE; BUGMSG(D_DURING, "\n"); BUGMSG(D_DURING, "in arcnet_interrupt\n"); lp = netdev_priv(dev); BUG_ON(!lp); spin_lock(&lp->lock); /* * RESET flag was enabled - if device is not running, we must clear it right * away (but nothing else). */ if (!netif_running(dev)) { if (ASTATUS() & RESETflag) ACOMMAND(CFLAGScmd | RESETclear); AINTMASK(0); spin_unlock(&lp->lock); return IRQ_HANDLED; } BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n", ASTATUS(), lp->intmask); boguscount = 5; do { status = ASTATUS(); diagstatus = (status >> 8) & 0xFF; BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", __FILE__,__LINE__,__func__,status); didsomething = 0; /* * RESET flag was enabled - card is resetting and if RX is * disabled, it's NOT because we just got a packet. * * The card is in an undefined state. Clear it out and start over. */ if (status & RESETflag) { BUGMSG(D_NORMAL, "spurious reset (status=%Xh)\n", status); arcnet_close(dev); arcnet_open(dev); /* get out of the interrupt handler! */ break; } /* * RX is inhibited - we must have received something. Prepare to * receive into the next buffer. * * We don't actually copy the received packet from the card until * after the transmit handler runs (and possibly launches the next * tx); this should improve latency slightly if we get both types * of interrupts at once. */ recbuf = -1; if (status & lp->intmask & NORXflag) { recbuf = lp->cur_rx; BUGMSG(D_DURING, "Buffer #%d: receive irq (status=%Xh)\n", recbuf, status); lp->cur_rx = get_arcbuf(dev); if (lp->cur_rx != -1) { BUGMSG(D_DURING, "enabling receive to buffer #%d\n", lp->cur_rx); ACOMMAND(RXcmd | (lp->cur_rx << 3) | RXbcasts); } didsomething++; } if((diagstatus & EXCNAKflag)) { BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n", diagstatus); ACOMMAND(NOTXcmd); /* disable transmit */ lp->excnak_pending = 1; ACOMMAND(EXCNAKclear); lp->intmask &= ~(EXCNAKflag); didsomething++; } /* a transmit finished, and we're interested in it. */ if ((status & lp->intmask & TXFREEflag) || lp->timed_out) { lp->intmask &= ~(TXFREEflag|EXCNAKflag); BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status); if (lp->cur_tx != -1 && !lp->timed_out) { if(!(status & TXACKflag)) { if (lp->lasttrans_dest != 0) { BUGMSG(D_EXTRA, "transmit was not acknowledged! " "(status=%Xh, dest=%02Xh)\n", status, lp->lasttrans_dest); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } else { BUGMSG(D_DURING, "broadcast was not acknowledged; that's normal " "(status=%Xh, dest=%02Xh)\n", status, lp->lasttrans_dest); } } if (lp->outgoing.proto && lp->outgoing.proto->ack_tx) { int ackstatus; if(status & TXACKflag) ackstatus=2; else if(lp->excnak_pending) ackstatus=1; else ackstatus=0; lp->outgoing.proto ->ack_tx(dev, ackstatus); } } if (lp->cur_tx != -1) release_arcbuf(dev, lp->cur_tx); lp->cur_tx = -1; lp->timed_out = 0; didsomething++; /* send another packet if there is one */ go_tx(dev); /* continue a split packet, if any */ if (lp->outgoing.proto && lp->outgoing.proto->continue_tx) { int txbuf = get_arcbuf(dev); if (txbuf != -1) { if (lp->outgoing.proto->continue_tx(dev, txbuf)) { /* that was the last segment */ dev->stats.tx_bytes += lp->outgoing.skb->len; if(!lp->outgoing.proto->ack_tx) { dev_kfree_skb_irq(lp->outgoing.skb); lp->outgoing.proto = NULL; } } lp->next_tx = txbuf; } } /* inform upper layers of idleness, if necessary */ if (lp->cur_tx == -1) netif_wake_queue(dev); } /* now process the received packet, if any */ if (recbuf != -1) { BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0); arcnet_rx(dev, recbuf); release_arcbuf(dev, recbuf); didsomething++; } if (status & lp->intmask & RECONflag) { ACOMMAND(CFLAGScmd | CONFIGclear); dev->stats.tx_carrier_errors++; BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", status); /* MYRECON bit is at bit 7 of diagstatus */ if(diagstatus & 0x80) BUGMSG(D_RECON,"Put out that recon myself\n"); /* is the RECON info empty or old? */ if (!lp->first_recon || !lp->last_recon || time_after(jiffies, lp->last_recon + HZ * 10)) { if (lp->network_down) BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n"); lp->first_recon = lp->last_recon = jiffies; lp->num_recons = lp->network_down = 0; BUGMSG(D_DURING, "recon: clearing counters.\n"); } else { /* add to current RECON counter */ lp->last_recon = jiffies; lp->num_recons++; BUGMSG(D_DURING, "recon: counter=%d, time=%lds, net=%d\n", lp->num_recons, (lp->last_recon - lp->first_recon) / HZ, lp->network_down); /* if network is marked up; * and first_recon and last_recon are 60+ apart; * and the average no. of recons counted is * > RECON_THRESHOLD/min; * then print a warning message. */ if (!lp->network_down && (lp->last_recon - lp->first_recon) <= HZ * 60 && lp->num_recons >= RECON_THRESHOLD) { lp->network_down = 1; BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n"); } else if (!lp->network_down && lp->last_recon - lp->first_recon > HZ * 60) { /* reset counters if we've gone for over a minute. */ lp->first_recon = lp->last_recon; lp->num_recons = 1; } } } else if (lp->network_down && time_after(jiffies, lp->last_recon + HZ * 10)) { if (lp->network_down) BUGMSG(D_NORMAL, "cabling restored?\n"); lp->first_recon = lp->last_recon = 0; lp->num_recons = lp->network_down = 0; BUGMSG(D_DURING, "not recon: clearing counters anyway.\n"); } if(didsomething) { retval |= IRQ_HANDLED; } } while (--boguscount && didsomething); BUGMSG(D_DURING, "arcnet_interrupt complete (status=%Xh, count=%d)\n", ASTATUS(), boguscount); BUGMSG(D_DURING, "\n"); AINTMASK(0); udelay(1); AINTMASK(lp->intmask); spin_unlock(&lp->lock); return retval; } /* * This is a generic packet receiver that calls arcnet??_rx depending on the * protocol ID found. */ static void arcnet_rx(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct archdr pkt; struct arc_rfc1201 *soft; int length, ofs; soft = &pkt.soft.rfc1201; lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); if (pkt.hard.offset[0]) { ofs = pkt.hard.offset[0]; length = 256 - ofs; } else { ofs = pkt.hard.offset[1]; length = 512 - ofs; } /* get the full header, if possible */ if (sizeof(pkt.soft) <= length) lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft)); else { memset(&pkt.soft, 0, sizeof(pkt.soft)); lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); } BUGMSG(D_DURING, "Buffer #%d: received packet from %02Xh to %02Xh " "(%d+4 bytes)\n", bufnum, pkt.hard.source, pkt.hard.dest, length); dev->stats.rx_packets++; dev->stats.rx_bytes += length + ARC_HDR_SIZE; /* call the right receiver for the protocol */ if (arc_proto_map[soft->proto]->is_ip) { BUGLVL(D_PROTO) { struct ArcProto *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]], *newp = arc_proto_map[soft->proto]; if (oldp != newp) { BUGMSG(D_PROTO, "got protocol %02Xh; encap for host %02Xh is now '%c'" " (was '%c')\n", soft->proto, pkt.hard.source, newp->suffix, oldp->suffix); } } /* broadcasts will always be done with the last-used encap. */ lp->default_proto[0] = soft->proto; /* in striking contrast, the following isn't a hack. */ lp->default_proto[pkt.hard.source] = soft->proto; } /* call the protocol-specific receiver. */ arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length); } static void null_rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { BUGMSG(D_PROTO, "rx: don't know how to deal with proto %02Xh from host %02Xh.\n", pkthdr->soft.rfc1201.proto, pkthdr->hard.source); } static int null_build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { struct arcnet_local *lp = netdev_priv(dev); BUGMSG(D_PROTO, "tx: can't build header for encap %02Xh; load a protocol driver.\n", lp->default_proto[daddr]); /* always fails */ return 0; } /* the "do nothing" prepare_tx function warns that there's nothing to do. */ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct arc_hardware newpkt; BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n"); /* send a packet to myself -- will never get received, of course */ newpkt.source = newpkt.dest = dev->dev_addr[0]; /* only one byte of actual data (and it's random) */ newpkt.offset[0] = 0xFF; lp->hw.copy_to_card(dev, bufnum, 0, &newpkt, ARC_HDR_SIZE); return 1; /* done */ }
gpl-2.0
NoelMacwan/SXDNickiDS
drivers/staging/tidspbridge/core/sync.c
8283
3200
/* * sync.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Synchronization services. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> /* ----------------------------------- This */ #include <dspbridge/sync.h> #include <dspbridge/ntfy.h> DEFINE_SPINLOCK(sync_lock); /** * sync_set_event() - set or signal and specified event * @event: Event to be set.. * * set the @event, if there is an thread waiting for the event * it will be waken up, this function only wakes one thread. */ void sync_set_event(struct sync_object *event) { spin_lock_bh(&sync_lock); complete(&event->comp); if (event->multi_comp) complete(event->multi_comp); spin_unlock_bh(&sync_lock); } /** * sync_wait_on_multiple_events() - waits for multiple events to be set. * @events: Array of events to wait for them. * @count: number of elements of the array. * @timeout timeout on waiting for the evetns. * @pu_index index of the event set. * * This functios will wait until any of the array element is set or until * timeout. In case of success the function will return 0 and * @pu_index will store the index of the array element set or in case * of timeout the function will return -ETIME or in case of * interrupting by a signal it will return -EPERM. */ int sync_wait_on_multiple_events(struct sync_object **events, unsigned count, unsigned timeout, unsigned *index) { unsigned i; int status = -EPERM; struct completion m_comp; init_completion(&m_comp); if (SYNC_INFINITE == timeout) timeout = MAX_SCHEDULE_TIMEOUT; spin_lock_bh(&sync_lock); for (i = 0; i < count; i++) { if (completion_done(&events[i]->comp)) { INIT_COMPLETION(events[i]->comp); *index = i; spin_unlock_bh(&sync_lock); status = 0; goto func_end; } } for (i = 0; i < count; i++) events[i]->multi_comp = &m_comp; spin_unlock_bh(&sync_lock); if (!wait_for_completion_interruptible_timeout(&m_comp, msecs_to_jiffies(timeout))) status = -ETIME; spin_lock_bh(&sync_lock); for (i = 0; i < count; i++) { if (completion_done(&events[i]->comp)) { INIT_COMPLETION(events[i]->comp); *index = i; status = 0; } events[i]->multi_comp = NULL; } spin_unlock_bh(&sync_lock); func_end: return status; } /** * dsp_notifier_event() - callback function to nofity events * @this: pointer to itself struct notifier_block * @event: event to be notified. * @data: Currently not used. * */ int dsp_notifier_event(struct notifier_block *this, unsigned long event, void *data) { struct ntfy_event *ne = container_of(this, struct ntfy_event, noti_block); if (ne->event & event) sync_set_event(&ne->sync_obj); return NOTIFY_OK; }
gpl-2.0
ShikharArvind/android_kernel_xolo_q1100
drivers/video/s1d13xxxfb.c
8283
29103
/* drivers/video/s1d13xxxfb.c * * (c) 2004 Simtec Electronics * (c) 2005 Thibaut VARENE <varenet@parisc-linux.org> * (c) 2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * Driver for Epson S1D13xxx series framebuffer chips * * Adapted from * linux/drivers/video/skeletonfb.c * linux/drivers/video/epson1355fb.c * linux/drivers/video/epson/s1d13xxxfb.c (2.4 driver by Epson) * * TODO: - handle dual screen display (CRT and LCD at the same time). * - check_var(), mode change, etc. * - probably not SMP safe :) * - support all bitblt operations on all cards * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/fb.h> #include <linux/spinlock_types.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/io.h> #include <video/s1d13xxxfb.h> #define PFX "s1d13xxxfb: " #define BLIT "s1d13xxxfb_bitblt: " /* * set this to enable debugging on general functions */ #if 0 #define dbg(fmt, args...) do { printk(KERN_INFO fmt, ## args); } while(0) #else #define dbg(fmt, args...) do { } while (0) #endif /* * set this to enable debugging on 2D acceleration */ #if 0 #define dbg_blit(fmt, args...) do { printk(KERN_INFO BLIT fmt, ## args); } while (0) #else #define dbg_blit(fmt, args...) do { } while (0) #endif /* * we make sure only one bitblt operation is running */ static DEFINE_SPINLOCK(s1d13xxxfb_bitblt_lock); /* * list of card production ids */ static const int s1d13xxxfb_prod_ids[] = { S1D13505_PROD_ID, S1D13506_PROD_ID, S1D13806_PROD_ID, }; /* * List of card strings */ static const char *s1d13xxxfb_prod_names[] = { "S1D13505", "S1D13506", "S1D13806", }; /* * here we define the default struct fb_fix_screeninfo */ static struct fb_fix_screeninfo __devinitdata s1d13xxxfb_fix = { .id = S1D_FBID, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .xpanstep = 0, .ypanstep = 1, .ywrapstep = 0, .accel = FB_ACCEL_NONE, }; static inline u8 s1d13xxxfb_readreg(struct s1d13xxxfb_par *par, u16 regno) { #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT) || defined(CONFIG_PLAT_MAPPI3) regno=((regno & 1) ? (regno & ~1L) : (regno + 1)); #endif return readb(par->regs + regno); } static inline void s1d13xxxfb_writereg(struct s1d13xxxfb_par *par, u16 regno, u8 value) { #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT) || defined(CONFIG_PLAT_MAPPI3) regno=((regno & 1) ? (regno & ~1L) : (regno + 1)); #endif writeb(value, par->regs + regno); } static inline void s1d13xxxfb_runinit(struct s1d13xxxfb_par *par, const struct s1d13xxxfb_regval *initregs, const unsigned int size) { int i; for (i = 0; i < size; i++) { if ((initregs[i].addr == S1DREG_DELAYOFF) || (initregs[i].addr == S1DREG_DELAYON)) mdelay((int)initregs[i].value); else { s1d13xxxfb_writereg(par, initregs[i].addr, initregs[i].value); } } /* make sure the hardware can cope with us */ mdelay(1); } static inline void lcd_enable(struct s1d13xxxfb_par *par, int enable) { u8 mode = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); if (enable) mode |= 0x01; else mode &= ~0x01; s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); } static inline void crt_enable(struct s1d13xxxfb_par *par, int enable) { u8 mode = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); if (enable) mode |= 0x02; else mode &= ~0x02; s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, mode); } /************************************************************* framebuffer control functions *************************************************************/ static inline void s1d13xxxfb_setup_pseudocolour(struct fb_info *info) { info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->var.red.length = 4; info->var.green.length = 4; info->var.blue.length = 4; } static inline void s1d13xxxfb_setup_truecolour(struct fb_info *info) { info->fix.visual = FB_VISUAL_TRUECOLOR; info->var.bits_per_pixel = 16; info->var.red.length = 5; info->var.red.offset = 11; info->var.green.length = 6; info->var.green.offset = 5; info->var.blue.length = 5; info->var.blue.offset = 0; } /** * s1d13xxxfb_set_par - Alters the hardware state. * @info: frame buffer structure * * Using the fb_var_screeninfo in fb_info we set the depth of the * framebuffer. This function alters the par AND the * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in * fb_info since we are using that data. This means we depend on the * data in var inside fb_info to be supported by the hardware. * xxxfb_check_var is always called before xxxfb_set_par to ensure this. * * XXX TODO: write proper s1d13xxxfb_check_var(), without which that * function is quite useless. */ static int s1d13xxxfb_set_par(struct fb_info *info) { struct s1d13xxxfb_par *s1dfb = info->par; unsigned int val; dbg("s1d13xxxfb_set_par: bpp=%d\n", info->var.bits_per_pixel); if ((s1dfb->display & 0x01)) /* LCD */ val = s1d13xxxfb_readreg(s1dfb, S1DREG_LCD_DISP_MODE); /* read colour control */ else /* CRT */ val = s1d13xxxfb_readreg(s1dfb, S1DREG_CRT_DISP_MODE); /* read colour control */ val &= ~0x07; switch (info->var.bits_per_pixel) { case 4: dbg("pseudo colour 4\n"); s1d13xxxfb_setup_pseudocolour(info); val |= 2; break; case 8: dbg("pseudo colour 8\n"); s1d13xxxfb_setup_pseudocolour(info); val |= 3; break; case 16: dbg("true colour\n"); s1d13xxxfb_setup_truecolour(info); val |= 5; break; default: dbg("bpp not supported!\n"); return -EINVAL; } dbg("writing %02x to display mode register\n", val); if ((s1dfb->display & 0x01)) /* LCD */ s1d13xxxfb_writereg(s1dfb, S1DREG_LCD_DISP_MODE, val); else /* CRT */ s1d13xxxfb_writereg(s1dfb, S1DREG_CRT_DISP_MODE, val); info->fix.line_length = info->var.xres * info->var.bits_per_pixel; info->fix.line_length /= 8; dbg("setting line_length to %d\n", info->fix.line_length); dbg("done setup\n"); return 0; } /** * s1d13xxxfb_setcolreg - sets a color register. * @regno: Which register in the CLUT we are programming * @red: The red value which can be up to 16 bits wide * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct s1d13xxxfb_par *s1dfb = info->par; unsigned int pseudo_val; if (regno >= S1D_PALETTE_SIZE) return -EINVAL; dbg("s1d13xxxfb_setcolreg: %d: rgb=%d,%d,%d, tr=%d\n", regno, red, green, blue, transp); if (info->var.grayscale) red = green = blue = (19595*red + 38470*green + 7471*blue) >> 16; switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: if (regno >= 16) return -EINVAL; /* deal with creating pseudo-palette entries */ pseudo_val = (red >> 11) << info->var.red.offset; pseudo_val |= (green >> 10) << info->var.green.offset; pseudo_val |= (blue >> 11) << info->var.blue.offset; dbg("s1d13xxxfb_setcolreg: pseudo %d, val %08x\n", regno, pseudo_val); #if defined(CONFIG_PLAT_MAPPI) ((u32 *)info->pseudo_palette)[regno] = cpu_to_le16(pseudo_val); #else ((u32 *)info->pseudo_palette)[regno] = pseudo_val; #endif break; case FB_VISUAL_PSEUDOCOLOR: s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_ADDR, regno); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, red); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, green); s1d13xxxfb_writereg(s1dfb, S1DREG_LKUP_DATA, blue); break; default: return -ENOSYS; } dbg("s1d13xxxfb_setcolreg: done\n"); return 0; } /** * s1d13xxxfb_blank - blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer * * Blank the screen if blank_mode != 0, else unblank. Return 0 if * blanking succeeded, != 0 if un-/blanking failed due to e.g. a * video mode which doesn't support it. Implements VESA suspend * and powerdown modes on hardware that supports disabling hsync/vsync: * blank_mode == 2: suspend vsync * blank_mode == 3: suspend hsync * blank_mode == 4: powerdown * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_blank(int blank_mode, struct fb_info *info) { struct s1d13xxxfb_par *par = info->par; dbg("s1d13xxxfb_blank: blank=%d, info=%p\n", blank_mode, info); switch (blank_mode) { case FB_BLANK_UNBLANK: case FB_BLANK_NORMAL: if ((par->display & 0x01) != 0) lcd_enable(par, 1); if ((par->display & 0x02) != 0) crt_enable(par, 1); break; case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: break; case FB_BLANK_POWERDOWN: lcd_enable(par, 0); crt_enable(par, 0); break; default: return -EINVAL; } /* let fbcon do a soft blank for us */ return ((blank_mode == FB_BLANK_NORMAL) ? 1 : 0); } /** * s1d13xxxfb_pan_display - Pans the display. * @var: frame buffer variable screen structure * @info: frame buffer structure that represents a single frame buffer * * Pan (or wrap, depending on the `vmode' field) the display using the * `yoffset' field of the `var' structure (`xoffset' not yet supported). * If the values don't fit, return -EINVAL. * * Returns negative errno on error, or zero on success. */ static int s1d13xxxfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct s1d13xxxfb_par *par = info->par; u32 start; if (var->xoffset != 0) /* not yet ... */ return -EINVAL; if (var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; start = (info->fix.line_length >> 1) * var->yoffset; if ((par->display & 0x01)) { /* LCD */ s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START0, (start & 0xff)); s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START1, ((start >> 8) & 0xff)); s1d13xxxfb_writereg(par, S1DREG_LCD_DISP_START2, ((start >> 16) & 0x0f)); } else { /* CRT */ s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START0, (start & 0xff)); s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START1, ((start >> 8) & 0xff)); s1d13xxxfb_writereg(par, S1DREG_CRT_DISP_START2, ((start >> 16) & 0x0f)); } return 0; } /************************************************************ functions to handle bitblt acceleration ************************************************************/ /** * bltbit_wait_bitclear - waits for change in register value * @info : frambuffer structure * @bit : value currently in register * @timeout : ... * * waits until value changes FROM bit * */ static u8 bltbit_wait_bitclear(struct fb_info *info, u8 bit, int timeout) { while (s1d13xxxfb_readreg(info->par, S1DREG_BBLT_CTL0) & bit) { udelay(10); if (!--timeout) { dbg_blit("wait_bitclear timeout\n"); break; } } return timeout; } /* * s1d13xxxfb_bitblt_copyarea - accelerated copyarea function * @info : framebuffer structure * @area : fb_copyarea structure * * supports (atleast) S1D13506 * */ static void s1d13xxxfb_bitblt_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 dst, src; u32 stride; u16 reverse = 0; u16 sx = area->sx, sy = area->sy; u16 dx = area->dx, dy = area->dy; u16 width = area->width, height = area->height; u16 bpp; spin_lock(&s1d13xxxfb_bitblt_lock); /* bytes per xres line */ bpp = (info->var.bits_per_pixel >> 3); stride = bpp * info->var.xres; /* reverse, calculate the last pixel in rectangle */ if ((dy > sy) || ((dy == sy) && (dx >= sx))) { dst = (((dy + height - 1) * stride) + (bpp * (dx + width - 1))); src = (((sy + height - 1) * stride) + (bpp * (sx + width - 1))); reverse = 1; /* not reverse, calculate the first pixel in rectangle */ } else { /* (y * xres) + (bpp * x) */ dst = (dy * stride) + (bpp * dx); src = (sy * stride) + (bpp * sx); } /* set source address */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START0, (src & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START1, (src >> 8) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_SRC_START2, (src >> 16) & 0x00ff); /* set destination address */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dst & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, (dst >> 8) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, (dst >> 16) & 0x00ff); /* program height and width */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, (width & 0xff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (width >> 8)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, (height & 0xff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (height >> 8)); /* negative direction ROP */ if (reverse == 1) { dbg_blit("(copyarea) negative rop\n"); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x03); } else /* positive direction ROP */ { s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, 0x02); dbg_blit("(copyarea) positive rop\n"); } /* set for rectangel mode and not linear */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0); /* setup the bpp 1 = 16bpp, 0 = 8bpp*/ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (bpp >> 1)); /* set words per xres */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (stride >> 1) & 0xff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (stride >> 9)); dbg_blit("(copyarea) dx=%d, dy=%d\n", dx, dy); dbg_blit("(copyarea) sx=%d, sy=%d\n", sx, sy); dbg_blit("(copyarea) width=%d, height=%d\n", width - 1, height - 1); dbg_blit("(copyarea) stride=%d\n", stride); dbg_blit("(copyarea) bpp=%d=0x0%d, mem_offset1=%d, mem_offset2=%d\n", bpp, (bpp >> 1), (stride >> 1) & 0xff, stride >> 9); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CC_EXP, 0x0c); /* initialize the engine */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80); /* wait to complete */ bltbit_wait_bitclear(info, 0x80, 8000); spin_unlock(&s1d13xxxfb_bitblt_lock); } /** * * s1d13xxxfb_bitblt_solidfill - accelerated solidfill function * @info : framebuffer structure * @rect : fb_fillrect structure * * supports (atleast 13506) * **/ static void s1d13xxxfb_bitblt_solidfill(struct fb_info *info, const struct fb_fillrect *rect) { u32 screen_stride, dest; u32 fg; u16 bpp = (info->var.bits_per_pixel >> 3); /* grab spinlock */ spin_lock(&s1d13xxxfb_bitblt_lock); /* bytes per x width */ screen_stride = (bpp * info->var.xres); /* bytes to starting point */ dest = ((rect->dy * screen_stride) + (bpp * rect->dx)); dbg_blit("(solidfill) dx=%d, dy=%d, stride=%d, dest=%d\n" "(solidfill) : rect_width=%d, rect_height=%d\n", rect->dx, rect->dy, screen_stride, dest, rect->width - 1, rect->height - 1); dbg_blit("(solidfill) : xres=%d, yres=%d, bpp=%d\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); dbg_blit("(solidfill) : rop=%d\n", rect->rop); /* We split the destination into the three registers */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START0, (dest & 0x00ff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START1, ((dest >> 8) & 0x00ff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_DST_START2, ((dest >> 16) & 0x00ff)); /* give information regarding rectangel width */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH0, ((rect->width) & 0x00ff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_WIDTH1, (rect->width >> 8)); /* give information regarding rectangel height */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT0, ((rect->height) & 0x00ff) - 1); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_HEIGHT1, (rect->height >> 8)); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((u32 *)info->pseudo_palette)[rect->color]; dbg_blit("(solidfill) truecolor/directcolor\n"); dbg_blit("(solidfill) pseudo_palette[%d] = %d\n", rect->color, fg); } else { fg = rect->color; dbg_blit("(solidfill) color = %d\n", rect->color); } /* set foreground color */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC0, (fg & 0xff)); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_FGC1, (fg >> 8) & 0xff); /* set rectangual region of memory (rectangle and not linear) */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x0); /* set operation mode SOLID_FILL */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_OP, BBLT_SOLID_FILL); /* set bits per pixel (1 = 16bpp, 0 = 8bpp) */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL1, (info->var.bits_per_pixel >> 4)); /* set the memory offset for the bblt in word sizes */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF0, (screen_stride >> 1) & 0x00ff); s1d13xxxfb_writereg(info->par, S1DREG_BBLT_MEM_OFF1, (screen_stride >> 9)); /* and away we go.... */ s1d13xxxfb_writereg(info->par, S1DREG_BBLT_CTL0, 0x80); /* wait until its done */ bltbit_wait_bitclear(info, 0x80, 8000); /* let others play */ spin_unlock(&s1d13xxxfb_bitblt_lock); } /* framebuffer information structures */ static struct fb_ops s1d13xxxfb_fbops = { .owner = THIS_MODULE, .fb_set_par = s1d13xxxfb_set_par, .fb_setcolreg = s1d13xxxfb_setcolreg, .fb_blank = s1d13xxxfb_blank, .fb_pan_display = s1d13xxxfb_pan_display, /* gets replaced at chip detection time */ .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int s1d13xxxfb_width_tab[2][4] __devinitdata = { {4, 8, 16, -1}, {9, 12, 18, -1}, }; /** * s1d13xxxfb_fetch_hw_state - Configure the framebuffer according to * hardware setup. * @info: frame buffer structure * * We setup the framebuffer structures according to the current * hardware setup. On some machines, the BIOS will have filled * the chip registers with such info, on others, these values will * have been written in some init procedure. In any case, the * software values needs to match the hardware ones. This is what * this function ensures. * * Note: some of the hardcoded values here might need some love to * work on various chips, and might need to no longer be hardcoded. */ static void __devinit s1d13xxxfb_fetch_hw_state(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct fb_fix_screeninfo *fix = &info->fix; struct s1d13xxxfb_par *par = info->par; u8 panel, display; u16 offset; u32 xres, yres; u32 xres_virtual, yres_virtual; int bpp, lcd_bpp; int is_color, is_dual, is_tft; int lcd_enabled, crt_enabled; fix->type = FB_TYPE_PACKED_PIXELS; /* general info */ par->display = s1d13xxxfb_readreg(par, S1DREG_COM_DISP_MODE); crt_enabled = (par->display & 0x02) != 0; lcd_enabled = (par->display & 0x01) != 0; if (lcd_enabled && crt_enabled) printk(KERN_WARNING PFX "Warning: LCD and CRT detected, using LCD\n"); if (lcd_enabled) display = s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_MODE); else /* CRT */ display = s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_MODE); bpp = display & 0x07; switch (bpp) { case 2: /* 4 bpp */ case 3: /* 8 bpp */ var->bits_per_pixel = 8; var->red.offset = var->green.offset = var->blue.offset = 0; var->red.length = var->green.length = var->blue.length = 8; break; case 5: /* 16 bpp */ s1d13xxxfb_setup_truecolour(info); break; default: dbg("bpp: %i\n", bpp); } fb_alloc_cmap(&info->cmap, 256, 0); /* LCD info */ panel = s1d13xxxfb_readreg(par, S1DREG_PANEL_TYPE); is_color = (panel & 0x04) != 0; is_dual = (panel & 0x02) != 0; is_tft = (panel & 0x01) != 0; lcd_bpp = s1d13xxxfb_width_tab[is_tft][(panel >> 4) & 3]; if (lcd_enabled) { xres = (s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_HWIDTH) + 1) * 8; yres = (s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_VHEIGHT0) + ((s1d13xxxfb_readreg(par, S1DREG_LCD_DISP_VHEIGHT1) & 0x03) << 8) + 1); offset = (s1d13xxxfb_readreg(par, S1DREG_LCD_MEM_OFF0) + ((s1d13xxxfb_readreg(par, S1DREG_LCD_MEM_OFF1) & 0x7) << 8)); } else { /* crt */ xres = (s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_HWIDTH) + 1) * 8; yres = (s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_VHEIGHT0) + ((s1d13xxxfb_readreg(par, S1DREG_CRT_DISP_VHEIGHT1) & 0x03) << 8) + 1); offset = (s1d13xxxfb_readreg(par, S1DREG_CRT_MEM_OFF0) + ((s1d13xxxfb_readreg(par, S1DREG_CRT_MEM_OFF1) & 0x7) << 8)); } xres_virtual = offset * 16 / var->bits_per_pixel; yres_virtual = fix->smem_len / (offset * 2); var->xres = xres; var->yres = yres; var->xres_virtual = xres_virtual; var->yres_virtual = yres_virtual; var->xoffset = var->yoffset = 0; fix->line_length = offset * 2; var->grayscale = !is_color; var->activate = FB_ACTIVATE_NOW; dbg(PFX "bpp=%d, lcd_bpp=%d, " "crt_enabled=%d, lcd_enabled=%d\n", var->bits_per_pixel, lcd_bpp, crt_enabled, lcd_enabled); dbg(PFX "xres=%d, yres=%d, vxres=%d, vyres=%d " "is_color=%d, is_dual=%d, is_tft=%d\n", xres, yres, xres_virtual, yres_virtual, is_color, is_dual, is_tft); } static int s1d13xxxfb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); struct s1d13xxxfb_par *par = NULL; if (info) { par = info->par; if (par && par->regs) { /* disable output & enable powersave */ s1d13xxxfb_writereg(par, S1DREG_COM_DISP_MODE, 0x00); s1d13xxxfb_writereg(par, S1DREG_PS_CNF, 0x11); iounmap(par->regs); } fb_dealloc_cmap(&info->cmap); if (info->screen_base) iounmap(info->screen_base); framebuffer_release(info); } release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); release_mem_region(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); return 0; } static int __devinit s1d13xxxfb_probe(struct platform_device *pdev) { struct s1d13xxxfb_par *default_par; struct fb_info *info; struct s1d13xxxfb_pdata *pdata = NULL; int ret = 0; int i; u8 revision, prod_id; dbg("probe called: device is %p\n", pdev); printk(KERN_INFO "Epson S1D13XXX FB Driver\n"); /* enable platform-dependent hardware glue, if any */ if (pdev->dev.platform_data) pdata = pdev->dev.platform_data; if (pdata && pdata->platform_init_video) pdata->platform_init_video(); if (pdev->num_resources != 2) { dev_err(&pdev->dev, "invalid num_resources: %i\n", pdev->num_resources); ret = -ENODEV; goto bail; } /* resource[0] is VRAM, resource[1] is registers */ if (pdev->resource[0].flags != IORESOURCE_MEM || pdev->resource[1].flags != IORESOURCE_MEM) { dev_err(&pdev->dev, "invalid resource type\n"); ret = -ENODEV; goto bail; } if (!request_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto bail; } if (!request_mem_region(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto bail; } info = framebuffer_alloc(sizeof(struct s1d13xxxfb_par) + sizeof(u32) * 256, &pdev->dev); if (!info) { ret = -ENOMEM; goto bail; } platform_set_drvdata(pdev, info); default_par = info->par; default_par->regs = ioremap_nocache(pdev->resource[1].start, pdev->resource[1].end - pdev->resource[1].start +1); if (!default_par->regs) { printk(KERN_ERR PFX "unable to map registers\n"); ret = -ENOMEM; goto bail; } info->pseudo_palette = default_par->pseudo_palette; info->screen_base = ioremap_nocache(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start +1); if (!info->screen_base) { printk(KERN_ERR PFX "unable to map framebuffer\n"); ret = -ENOMEM; goto bail; } /* production id is top 6 bits */ prod_id = s1d13xxxfb_readreg(default_par, S1DREG_REV_CODE) >> 2; /* revision id is lower 2 bits */ revision = s1d13xxxfb_readreg(default_par, S1DREG_REV_CODE) & 0x3; ret = -ENODEV; for (i = 0; i < ARRAY_SIZE(s1d13xxxfb_prod_ids); i++) { if (prod_id == s1d13xxxfb_prod_ids[i]) { /* looks like we got it in our list */ default_par->prod_id = prod_id; default_par->revision = revision; ret = 0; break; } } if (!ret) { printk(KERN_INFO PFX "chip production id %i = %s\n", prod_id, s1d13xxxfb_prod_names[i]); printk(KERN_INFO PFX "chip revision %i\n", revision); } else { printk(KERN_INFO PFX "unknown chip production id %i, revision %i\n", prod_id, revision); printk(KERN_INFO PFX "please contant maintainer\n"); goto bail; } info->fix = s1d13xxxfb_fix; info->fix.mmio_start = pdev->resource[1].start; info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1; info->fix.smem_start = pdev->resource[0].start; info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1; printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n", default_par->regs, info->fix.smem_len / 1024, info->screen_base); info->par = default_par; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; info->fbops = &s1d13xxxfb_fbops; switch(prod_id) { case S1D13506_PROD_ID: /* activate acceleration */ s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill; s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA; break; default: break; } /* perform "manual" chip initialization, if needed */ if (pdata && pdata->initregs) s1d13xxxfb_runinit(info->par, pdata->initregs, pdata->initregssize); s1d13xxxfb_fetch_hw_state(info); if (register_framebuffer(info) < 0) { ret = -EINVAL; goto bail; } printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; bail: s1d13xxxfb_remove(pdev); return ret; } #ifdef CONFIG_PM static int s1d13xxxfb_suspend(struct platform_device *dev, pm_message_t state) { struct fb_info *info = platform_get_drvdata(dev); struct s1d13xxxfb_par *s1dfb = info->par; struct s1d13xxxfb_pdata *pdata = NULL; /* disable display */ lcd_enable(s1dfb, 0); crt_enable(s1dfb, 0); if (dev->dev.platform_data) pdata = dev->dev.platform_data; #if 0 if (!s1dfb->disp_save) s1dfb->disp_save = kmalloc(info->fix.smem_len, GFP_KERNEL); if (!s1dfb->disp_save) { printk(KERN_ERR PFX "no memory to save screen"); return -ENOMEM; } memcpy_fromio(s1dfb->disp_save, info->screen_base, info->fix.smem_len); #else s1dfb->disp_save = NULL; #endif if (!s1dfb->regs_save) s1dfb->regs_save = kmalloc(info->fix.mmio_len, GFP_KERNEL); if (!s1dfb->regs_save) { printk(KERN_ERR PFX "no memory to save registers"); return -ENOMEM; } /* backup all registers */ memcpy_fromio(s1dfb->regs_save, s1dfb->regs, info->fix.mmio_len); /* now activate power save mode */ s1d13xxxfb_writereg(s1dfb, S1DREG_PS_CNF, 0x11); if (pdata && pdata->platform_suspend_video) return pdata->platform_suspend_video(); else return 0; } static int s1d13xxxfb_resume(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); struct s1d13xxxfb_par *s1dfb = info->par; struct s1d13xxxfb_pdata *pdata = NULL; /* awaken the chip */ s1d13xxxfb_writereg(s1dfb, S1DREG_PS_CNF, 0x10); /* do not let go until SDRAM "wakes up" */ while ((s1d13xxxfb_readreg(s1dfb, S1DREG_PS_STATUS) & 0x01)) udelay(10); if (dev->dev.platform_data) pdata = dev->dev.platform_data; if (s1dfb->regs_save) { /* will write RO regs, *should* get away with it :) */ memcpy_toio(s1dfb->regs, s1dfb->regs_save, info->fix.mmio_len); kfree(s1dfb->regs_save); } if (s1dfb->disp_save) { memcpy_toio(info->screen_base, s1dfb->disp_save, info->fix.smem_len); kfree(s1dfb->disp_save); /* XXX kmalloc()'d when? */ } if ((s1dfb->display & 0x01) != 0) lcd_enable(s1dfb, 1); if ((s1dfb->display & 0x02) != 0) crt_enable(s1dfb, 1); if (pdata && pdata->platform_resume_video) return pdata->platform_resume_video(); else return 0; } #endif /* CONFIG_PM */ static struct platform_driver s1d13xxxfb_driver = { .probe = s1d13xxxfb_probe, .remove = s1d13xxxfb_remove, #ifdef CONFIG_PM .suspend = s1d13xxxfb_suspend, .resume = s1d13xxxfb_resume, #endif .driver = { .name = S1D_DEVICENAME, }, }; static int __init s1d13xxxfb_init(void) { #ifndef MODULE if (fb_get_options("s1d13xxxfb", NULL)) return -ENODEV; #endif return platform_driver_register(&s1d13xxxfb_driver); } static void __exit s1d13xxxfb_exit(void) { platform_driver_unregister(&s1d13xxxfb_driver); } module_init(s1d13xxxfb_init); module_exit(s1d13xxxfb_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Framebuffer driver for S1D13xxx devices"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, Thibaut VARENE <varenet@parisc-linux.org>");
gpl-2.0
Trinityhaxxor/platform_kernel_msm8x60_stock
arch/mips/ar7/memory.c
8795
1865
/* * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/pfn.h> #include <linux/proc_fs.h> #include <linux/string.h> #include <linux/swap.h> #include <asm/bootinfo.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/mach-ar7/ar7.h> #include <asm/mips-boards/prom.h> static int __init memsize(void) { u32 size = (64 << 20); u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4); u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end)); u32 *tmpaddr = addr; while (tmpaddr > kernel_end) { *tmpaddr = (u32)tmpaddr; size >>= 1; tmpaddr -= size >> 2; } do { tmpaddr += size >> 2; if (*tmpaddr != (u32)tmpaddr) break; size <<= 1; } while (size < (64 << 20)); writel((u32)tmpaddr, &addr); return size; } void __init prom_meminit(void) { unsigned long pages; pages = memsize() >> PAGE_SHIFT; add_memory_region(PHYS_OFFSET, pages << PAGE_SHIFT, BOOT_MEM_RAM); } void __init prom_free_prom_memory(void) { /* Nothing to free */ }
gpl-2.0
jiangliu/linux
arch/frv/kernel/time.c
10331
3146
/* time.c: FRV arch-specific time handling * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/kernel/time.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/profile.h> #include <linux/irq.h> #include <linux/mm.h> #include <asm/io.h> #include <asm/timer-regs.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include <linux/timex.h> #define TICK_SIZE (tick_nsec / 1000) unsigned long __nongprelbss __clkin_clock_speed_HZ; unsigned long __nongprelbss __ext_bus_clock_speed_HZ; unsigned long __nongprelbss __res_bus_clock_speed_HZ; unsigned long __nongprelbss __sdram_clock_speed_HZ; unsigned long __nongprelbss __core_bus_clock_speed_HZ; unsigned long __nongprelbss __core_clock_speed_HZ; unsigned long __nongprelbss __dsu_clock_speed_HZ; unsigned long __nongprelbss __serial_clock_speed_HZ; unsigned long __delay_loops_MHz; static irqreturn_t timer_interrupt(int irq, void *dummy); static struct irqaction timer_irq = { .handler = timer_interrupt, .flags = IRQF_DISABLED, .name = "timer", }; /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "xtime_update()" routine every clocktick */ static irqreturn_t timer_interrupt(int irq, void *dummy) { profile_tick(CPU_PROFILING); xtime_update(1); #ifdef CONFIG_HEARTBEAT static unsigned short n; n++; __set_LEDS(n); #endif /* CONFIG_HEARTBEAT */ update_process_times(user_mode(get_irq_regs())); return IRQ_HANDLED; } void time_divisor_init(void) { unsigned short base, pre, prediv; /* set the scheduling timer going */ pre = 1; prediv = 4; base = __res_bus_clock_speed_HZ / pre / HZ / (1 << prediv); __set_TPRV(pre); __set_TxCKSL_DATA(0, prediv); __set_TCTR(TCTR_SC_CTR0 | TCTR_RL_RW_LH8 | TCTR_MODE_2); __set_TCSR_DATA(0, base & 0xff); __set_TCSR_DATA(0, base >> 8); } void read_persistent_clock(struct timespec *ts) { unsigned int year, mon, day, hour, min, sec; extern void arch_gettod(int *year, int *mon, int *day, int *hour, int *min, int *sec); /* FIX by dqg : Set to zero for platforms that don't have tod */ /* without this time is undefined and can overflow time_t, causing */ /* very strange errors */ year = 1980; mon = day = 1; hour = min = sec = 0; arch_gettod (&year, &mon, &day, &hour, &min, &sec); if ((year += 1900) < 1970) year += 100; ts->tv_sec = mktime(year, mon, day, hour, min, sec); ts->tv_nsec = 0; } void time_init(void) { /* install scheduling interrupt handler */ setup_irq(IRQ_CPU_TIMER0, &timer_irq); time_divisor_init(); } /* * Scheduler clock - returns current time in nanosec units. */ unsigned long long sched_clock(void) { return jiffies_64 * (1000000000 / HZ); }
gpl-2.0
mozzwald/u-boot-pxa-zipitz2
board/eukrea/cpu9260/led.c
92
3778
/* * Copyright (c) 2009 Wind River Systems, Inc. * Tom Rix <Tom.Rix@windriver.com> * (C) Copyright 2009 * Eric Benard <eric@eukrea.com> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <status_led.h> #include <asm/arch/at91sam9260.h> #include <asm/arch/at91_pmc.h> #include <asm/arch/gpio.h> #include <asm/arch/io.h> static unsigned int saved_state[4] = {STATUS_LED_OFF, STATUS_LED_OFF, STATUS_LED_OFF, STATUS_LED_OFF}; void coloured_LED_init(void) { /* Enable clock */ at91_sys_write(AT91_PMC_PCER, 1 << AT91SAM9260_ID_PIOC); at91_set_gpio_output(CONFIG_RED_LED, 1); at91_set_gpio_output(CONFIG_GREEN_LED, 1); at91_set_gpio_output(CONFIG_YELLOW_LED, 1); at91_set_gpio_output(CONFIG_BLUE_LED, 1); at91_set_gpio_value(CONFIG_RED_LED, 1); at91_set_gpio_value(CONFIG_GREEN_LED, 1); at91_set_gpio_value(CONFIG_YELLOW_LED, 1); at91_set_gpio_value(CONFIG_BLUE_LED, 1); } void red_LED_off(void) { at91_set_gpio_value(CONFIG_RED_LED, 1); saved_state[STATUS_LED_RED] = STATUS_LED_OFF; } void green_LED_off(void) { at91_set_gpio_value(CONFIG_GREEN_LED, 1); saved_state[STATUS_LED_GREEN] = STATUS_LED_OFF; } void yellow_LED_off(void) { at91_set_gpio_value(CONFIG_YELLOW_LED, 1); saved_state[STATUS_LED_YELLOW] = STATUS_LED_OFF; } void blue_LED_off(void) { at91_set_gpio_value(CONFIG_BLUE_LED, 1); saved_state[STATUS_LED_BLUE] = STATUS_LED_OFF; } void red_LED_on(void) { at91_set_gpio_value(CONFIG_RED_LED, 0); saved_state[STATUS_LED_RED] = STATUS_LED_ON; } void green_LED_on(void) { at91_set_gpio_value(CONFIG_GREEN_LED, 0); saved_state[STATUS_LED_GREEN] = STATUS_LED_ON; } void yellow_LED_on(void) { at91_set_gpio_value(CONFIG_YELLOW_LED, 0); saved_state[STATUS_LED_YELLOW] = STATUS_LED_ON; } void blue_LED_on(void) { at91_set_gpio_value(CONFIG_BLUE_LED, 0); saved_state[STATUS_LED_BLUE] = STATUS_LED_ON; } void __led_init(led_id_t mask, int state) { __led_set(mask, state); } void __led_toggle(led_id_t mask) { if (STATUS_LED_BLUE == mask) { if (STATUS_LED_ON == saved_state[STATUS_LED_BLUE]) blue_LED_off(); else blue_LED_on(); } else if (STATUS_LED_RED == mask) { if (STATUS_LED_ON == saved_state[STATUS_LED_RED]) red_LED_off(); else red_LED_on(); } else if (STATUS_LED_GREEN == mask) { if (STATUS_LED_ON == saved_state[STATUS_LED_GREEN]) green_LED_off(); else green_LED_on(); } else if (STATUS_LED_YELLOW == mask) { if (STATUS_LED_ON == saved_state[STATUS_LED_YELLOW]) yellow_LED_off(); else yellow_LED_on(); } } void __led_set(led_id_t mask, int state) { if (STATUS_LED_BLUE == mask) { if (STATUS_LED_ON == state) blue_LED_on(); else blue_LED_off(); } else if (STATUS_LED_RED == mask) { if (STATUS_LED_ON == state) red_LED_on(); else red_LED_off(); } else if (STATUS_LED_GREEN == mask) { if (STATUS_LED_ON == state) green_LED_on(); else green_LED_off(); } else if (STATUS_LED_YELLOW == mask) { if (STATUS_LED_ON == state) yellow_LED_on(); else yellow_LED_off(); } }
gpl-2.0
ffosilva/android_kernel_sony_msm8974
drivers/media/radio/radio-iris-transport.c
92
5109
/* * Qualcomm's FM Shared Memory Transport Driver * * FM HCI_SMD ( FM HCI Shared Memory Driver) is Qualcomm's Shared memory driver * for the HCI protocol. This file is based on drivers/bluetooth/hci_vhci.c * * Copyright (c) 2000-2001, 2011-2012 The Linux Foundation. All rights reserved. * * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <mach/msm_smd.h> #include <media/radio-iris.h> struct radio_data { struct radio_hci_dev *hdev; struct tasklet_struct rx_task; struct smd_channel *fm_channel; }; struct radio_data hs; static struct work_struct *reset_worker; static void radio_hci_smd_destruct(struct radio_hci_dev *hdev) { radio_hci_unregister_dev(hs.hdev); } static void radio_hci_smd_recv_event(unsigned long temp) { int len; int rc; struct sk_buff *skb; unsigned char *buf; struct radio_data *hsmd = &hs; len = smd_read_avail(hsmd->fm_channel); while (len) { skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { FMDERR("Memory not allocated for the socket"); return; } buf = kmalloc(len, GFP_ATOMIC); if (!buf) { kfree_skb(skb); FMDERR("Error in allocating buffer memory"); return; } rc = smd_read(hsmd->fm_channel, (void *)buf, len); memcpy(skb_put(skb, len), buf, len); skb_orphan(skb); skb->dev = (struct net_device *)hs.hdev; rc = radio_hci_recv_frame(skb); kfree(buf); len = smd_read_avail(hsmd->fm_channel); } } static int radio_hci_smd_send_frame(struct sk_buff *skb) { int len = 0; len = smd_write(hs.fm_channel, skb->data, skb->len); if (len < skb->len) { FMDERR("Failed to write Data %d", len); kfree_skb(skb); return -ENODEV; } kfree_skb(skb); return 0; } static void send_disable_event(struct work_struct *worker) { struct sk_buff *skb; unsigned char buf[6] = { 0x0f, 0x04, 0x01, 0x02, 0x4c, 0x00 }; int len = sizeof(buf); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { FMDERR("Memory not allocated for the socket"); kfree(worker); return; } FMDERR("FM INSERT DISABLE Rx Event"); memcpy(skb_put(skb, len), buf, len); skb_orphan(skb); skb->dev = (struct net_device *)hs.hdev; radio_hci_recv_frame(skb); kfree(worker); } static void radio_hci_smd_notify_cmd(void *data, unsigned int event) { struct radio_hci_dev *hdev = hs.hdev; if (!hdev) { FMDERR("Frame for unknown HCI device (hdev=NULL)"); return; } switch (event) { case SMD_EVENT_DATA: tasklet_schedule(&hs.rx_task); break; case SMD_EVENT_OPEN: break; case SMD_EVENT_CLOSE: reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC); if (!reset_worker) { FMDERR("Out of memory"); break; } INIT_WORK(reset_worker, send_disable_event); schedule_work(reset_worker); break; default: break; } } static int radio_hci_smd_register_dev(struct radio_data *hsmd) { struct radio_hci_dev *hdev; int rc; if (hsmd == NULL) return -ENODEV; hdev = kmalloc(sizeof(struct radio_hci_dev), GFP_KERNEL); if (hdev == NULL) return -ENODEV; hsmd->hdev = hdev; tasklet_init(&hsmd->rx_task, radio_hci_smd_recv_event, (unsigned long) hsmd); hdev->send = radio_hci_smd_send_frame; hdev->destruct = radio_hci_smd_destruct; /* Open the SMD Channel and device and register the callback function */ rc = smd_named_open_on_edge("APPS_FM", SMD_APPS_WCNSS, &hsmd->fm_channel, hdev, radio_hci_smd_notify_cmd); if (rc < 0) { FMDERR("Cannot open the command channel"); hsmd->hdev = NULL; kfree(hdev); return -ENODEV; } smd_disable_read_intr(hsmd->fm_channel); if (radio_hci_register_dev(hdev) < 0) { FMDERR("Can't register HCI device"); smd_close(hsmd->fm_channel); hsmd->hdev = NULL; kfree(hdev); return -ENODEV; } return 0; } static void radio_hci_smd_deregister(void) { smd_close(hs.fm_channel); hs.fm_channel = 0; } #ifdef MODULE static int __init radio_hci_smd_init(void) { return radio_hci_smd_register_dev(&hs); } module_init(radio_hci_smd_init); static void __exit radio_hci_smd_exit(void) { radio_hci_smd_deregister(); } module_exit(radio_hci_smd_exit); int hci_fm_smd_register(void) { return 0; } void hci_fm_smd_deregister(void) { } #else int hci_fm_smd_register(void) { return radio_hci_smd_register_dev(&hs); } void hci_fm_smd_deregister(void) { radio_hci_smd_deregister(); } #endif MODULE_DESCRIPTION("Bluetooth SMD driver"); MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>"); MODULE_LICENSE("GPL v2");
gpl-2.0
omega-roms/omega_kernel_N9005
drivers/of/platform.c
348
13102
/* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * and Arnd Bergmann, IBM Corp. * Merged from powerpc/kernel/of_platform.c and * sparc{,64}/kernel/of_device.c by Stephen Rothwell * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/errno.h> #include <linux/module.h> #include <linux/amba/bus.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> const struct of_device_id of_default_bus_match_table[] = { { .compatible = "simple-bus", }, #ifdef CONFIG_ARM_AMBA { .compatible = "arm,amba-bus", }, #endif /* CONFIG_ARM_AMBA */ {} /* Empty terminated list */ }; static int of_dev_node_match(struct device *dev, void *data) { return dev->of_node == data; } /** * of_find_device_by_node - Find the platform_device associated with a node * @np: Pointer to device tree node * * Returns platform_device pointer, or NULL if not found */ struct platform_device *of_find_device_by_node(struct device_node *np) { struct device *dev; dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match); return dev ? to_platform_device(dev) : NULL; } EXPORT_SYMBOL(of_find_device_by_node); #if defined(CONFIG_PPC_DCR) #include <asm/dcr.h> #endif #ifdef CONFIG_OF_ADDRESS /* * The following routines scan a subtree and registers a device for * each applicable node. * * Note: sparc doesn't use these routines because it has a different * mechanism for creating devices from device tree nodes. */ /** * of_device_make_bus_id - Use the device node data to assign a unique name * @dev: pointer to device structure that is linked to a device tree node * * This routine will first try using either the dcr-reg or the reg property * value to derive a unique name. As a last resort it will use the node * name followed by a unique number. */ void of_device_make_bus_id(struct device *dev) { static atomic_t bus_no_reg_magic; struct device_node *node = dev->of_node; const u32 *reg; u64 addr; const __be32 *addrp; int magic; #ifdef CONFIG_PPC_DCR /* * If it's a DCR based device, use 'd' for native DCRs * and 'D' for MMIO DCRs. */ reg = of_get_property(node, "dcr-reg", NULL); if (reg) { #ifdef CONFIG_PPC_DCR_NATIVE dev_set_name(dev, "d%x.%s", *reg, node->name); #else /* CONFIG_PPC_DCR_NATIVE */ u64 addr = of_translate_dcr_address(node, *reg, NULL); if (addr != OF_BAD_ADDR) { dev_set_name(dev, "D%llx.%s", (unsigned long long)addr, node->name); return; } #endif /* !CONFIG_PPC_DCR_NATIVE */ } #endif /* CONFIG_PPC_DCR */ /* * For MMIO, get the physical address */ reg = of_get_property(node, "reg", NULL); if (reg) { if (of_can_translate_address(node)) { addr = of_translate_address(node, reg); } else { addrp = of_get_address(node, 0, NULL, NULL); if (addrp) addr = of_read_number(addrp, 1); else addr = OF_BAD_ADDR; } if (addr != OF_BAD_ADDR) { dev_set_name(dev, "%llx.%s", (unsigned long long)addr, node->name); return; } } /* * No BusID, use the node name and add a globally incremented * counter (and pray...) */ magic = atomic_add_return(1, &bus_no_reg_magic); dev_set_name(dev, "%s.%d", node->name, magic - 1); } /** * of_device_alloc - Allocate and initialize an of_device * @np: device node to assign to device * @bus_id: Name to assign to the device. May be null to use default name. * @parent: Parent device. */ struct platform_device *of_device_alloc(struct device_node *np, const char *bus_id, struct device *parent) { struct platform_device *dev; int rc, i, num_reg = 0, num_irq; struct resource *res, temp_res; dev = platform_device_alloc("", -1); if (!dev) return NULL; /* count the io and irq resources */ if (of_can_translate_address(np)) while (of_address_to_resource(np, num_reg, &temp_res) == 0) num_reg++; num_irq = of_irq_count(np); /* Populate the resource table */ if (num_irq || num_reg) { res = kzalloc(sizeof(*res) * (num_irq + num_reg), GFP_KERNEL); if (!res) { platform_device_put(dev); return NULL; } dev->num_resources = num_reg + num_irq; dev->resource = res; for (i = 0; i < num_reg; i++, res++) { rc = of_address_to_resource(np, i, res); WARN_ON(rc); } WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); } dev->dev.of_node = of_node_get(np); #if defined(CONFIG_MICROBLAZE) dev->dev.dma_mask = &dev->archdata.dma_mask; #endif dev->dev.parent = parent; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); return dev; } EXPORT_SYMBOL(of_device_alloc); /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for * @bus_id: name to assign device * @platform_data: pointer to populate platform_data pointer with * @parent: Linux device model parent device. * * Returns pointer to created platform device, or NULL if a device was not * registered. Unavailable devices will not get registered. */ struct platform_device *of_platform_device_create_pdata( struct device_node *np, const char *bus_id, void *platform_data, struct device *parent) { struct platform_device *dev; if (!of_device_is_available(np)) return NULL; dev = of_device_alloc(np, bus_id, parent); if (!dev) return NULL; #if defined(CONFIG_MICROBLAZE) dev->archdata.dma_mask = 0xffffffffUL; #endif dev->dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; /* We do not fill the DMA ops for platform devices by default. * This is currently the responsibility of the platform code * to do such, possibly using a device notifier */ if (of_device_add(dev) != 0) { platform_device_put(dev); return NULL; } return dev; } /** * of_platform_device_create - Alloc, initialize and register an of_device * @np: pointer to node to create device for * @bus_id: name to assign device * @parent: Linux device model parent device. * * Returns pointer to created platform device, or NULL if a device was not * registered. Unavailable devices will not get registered. */ struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent) { return of_platform_device_create_pdata(np, bus_id, NULL, parent); } EXPORT_SYMBOL(of_platform_device_create); #ifdef CONFIG_ARM_AMBA static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { struct amba_device *dev; const void *prop; int i, ret; pr_debug("Creating amba device %s\n", node->full_name); if (!of_device_is_available(node)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) return NULL; /* setup generic device info */ dev->dev.coherent_dma_mask = ~0; dev->dev.of_node = of_node_get(node); dev->dev.parent = parent; dev->dev.platform_data = platform_data; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); /* setup amba-specific device info */ dev->dma_mask = ~0; /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); if (prop) dev->periphid = of_read_ulong(prop, 1); /* Decode the IRQs and address ranges */ for (i = 0; i < AMBA_NR_IRQS; i++) dev->irq[i] = irq_of_parse_and_map(node, i); ret = of_address_to_resource(node, 0, &dev->res); if (ret) goto err_free; ret = amba_device_add(dev, &iomem_resource); if (ret) goto err_free; return dev; err_free: amba_device_put(dev); return NULL; } #else /* CONFIG_ARM_AMBA */ static struct amba_device *of_amba_device_create(struct device_node *node, const char *bus_id, void *platform_data, struct device *parent) { return NULL; } #endif /* CONFIG_ARM_AMBA */ /** * of_devname_lookup() - Given a device node, lookup the preferred Linux name */ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup, struct device_node *np) { struct resource res; if (!lookup) return NULL; for(; lookup->compatible != NULL; lookup++) { if (!of_device_is_compatible(np, lookup->compatible)) continue; if (of_address_to_resource(np, 0, &res)) continue; if (res.start != lookup->phys_addr) continue; pr_debug("%s: devname=%s\n", np->full_name, lookup->name); return lookup; } return NULL; } /** * of_platform_bus_create() - Create a device for a node and its children. * @bus: device node of the bus to instantiate * @matches: match table for bus nodes * @lookup: auxdata table for matching id and platform_data with device nodes * @parent: parent for new device, or NULL for top level. * @strict: require compatible property * * Creates a platform_device for the provided device_node, and optionally * recursively create devices for all the child nodes. */ static int of_platform_bus_create(struct device_node *bus, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent, bool strict) { const struct of_dev_auxdata *auxdata; struct device_node *child; struct platform_device *dev; const char *bus_id = NULL; void *platform_data = NULL; int rc = 0; /* Make sure it has a compatible property */ if (strict && (!of_get_property(bus, "compatible", NULL))) { pr_debug("%s() - skipping %s, no compatible prop\n", __func__, bus->full_name); return 0; } auxdata = of_dev_lookup(lookup, bus); if (auxdata) { bus_id = auxdata->name; platform_data = auxdata->platform_data; } if (of_device_is_compatible(bus, "arm,primecell")) { of_amba_device_create(bus, bus_id, platform_data, parent); return 0; } dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent); if (!dev || !of_match_node(matches, bus)) return 0; for_each_child_of_node(bus, child) { pr_debug(" create child: %s\n", child->full_name); rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict); if (rc) { of_node_put(child); break; } } return rc; } /** * of_platform_bus_probe() - Probe the device-tree for platform buses * @root: parent of the first level to probe or NULL for the root of the tree * @matches: match table for bus nodes * @parent: parent to hook devices from, NULL for toplevel * * Note that children of the provided root are not instantiated as devices * unless the specified root itself matches the bus list and is not NULL. */ int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent) { struct device_node *child; int rc = 0; root = root ? of_node_get(root) : of_find_node_by_path("/"); if (!root) return -EINVAL; pr_debug("of_platform_bus_probe()\n"); pr_debug(" starting at: %s\n", root->full_name); /* Do a self check of bus type, if there's a match, create children */ if (of_match_node(matches, root)) { rc = of_platform_bus_create(root, matches, NULL, parent, false); } else for_each_child_of_node(root, child) { if (!of_match_node(matches, child)) continue; rc = of_platform_bus_create(child, matches, NULL, parent, false); if (rc) break; } of_node_put(root); return rc; } EXPORT_SYMBOL(of_platform_bus_probe); /** * of_platform_populate() - Populate platform_devices from device tree data * @root: parent of the first level to probe or NULL for the root of the tree * @matches: match table, NULL to use the default * @parent: parent to hook devices from, NULL for toplevel * * Similar to of_platform_bus_probe(), this function walks the device tree * and creates devices from nodes. It differs in that it follows the modern * convention of requiring all device nodes to have a 'compatible' property, * and it is suitable for creating devices which are children of the root * node (of_platform_bus_probe will only create children of the root which * are selected by the @matches argument). * * New board support should be using this function instead of * of_platform_bus_probe(). * * Returns 0 on success, < 0 on failure. */ int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent) { struct device_node *child; int rc = 0; root = root ? of_node_get(root) : of_find_node_by_path("/"); if (!root) return -EINVAL; for_each_child_of_node(root, child) { rc = of_platform_bus_create(child, matches, lookup, parent, true); if (rc) break; } of_node_put(root); return rc; } #endif /* CONFIG_OF_ADDRESS */
gpl-2.0
TEAM-Gummy/android_kernel_samsung_hlte
drivers/media/platform/msm/camera_h/msm_vb2/msm_vb2.c
604
6963
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_vb2.h" static int msm_vb2_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { int i; struct msm_v4l2_format_data *data = q->drv_priv; if (data->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { if (WARN_ON(data->num_planes > VIDEO_MAX_PLANES)) return -EINVAL; *num_planes = data->num_planes; for (i = 0; i < data->num_planes; i++) sizes[i] = data->plane_sizes[i]; } else { pr_err("%s: Unsupported buf type :%d\n", __func__, data->type); return -EINVAL; } return 0; } int msm_vb2_buf_init(struct vb2_buffer *vb) { struct msm_stream *stream; struct msm_vb2_buffer *msm_vb2_buf; stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s: Couldn't find stream\n", __func__); return -EINVAL; } msm_vb2_buf = container_of(vb, struct msm_vb2_buffer, vb2_buf); msm_vb2_buf->in_freeq = 0; return 0; } static void msm_vb2_buf_queue(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; unsigned long flags; msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf); if (!msm_vb2) { pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__); return; } stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); return; } spin_lock_irqsave(&stream->stream_lock, flags); list_add_tail(&msm_vb2->list, &stream->queued_list); spin_unlock_irqrestore(&stream->stream_lock, flags); } static int msm_vb2_buf_finish(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; unsigned long flags; struct msm_vb2_buffer *msm_vb2_entry, *temp; msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf); if (!msm_vb2) { pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__); return -EINVAL; } stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); return -EINVAL; } spin_lock_irqsave(&stream->stream_lock, flags); list_for_each_entry_safe(msm_vb2_entry, temp, &(stream->queued_list), list) { if (msm_vb2_entry == msm_vb2) { list_del_init(&msm_vb2_entry->list); break; } } spin_unlock_irqrestore(&stream->stream_lock, flags); return 0; } static void msm_vb2_buf_cleanup(struct vb2_buffer *vb) { struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; unsigned long flags; msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf); if (!msm_vb2) { pr_err("%s:%d] vb2 NULL", __func__, __LINE__); return; } stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); return; } spin_lock_irqsave(&stream->stream_lock, flags); INIT_LIST_HEAD(&stream->queued_list); spin_unlock_irqrestore(&stream->stream_lock, flags); } static struct vb2_ops msm_vb2_get_q_op = { .queue_setup = msm_vb2_queue_setup, .buf_init = msm_vb2_buf_init, .buf_queue = msm_vb2_buf_queue, .buf_cleanup = msm_vb2_buf_cleanup, .buf_finish = msm_vb2_buf_finish, }; struct vb2_ops *msm_vb2_get_q_ops(void) { return &msm_vb2_get_q_op; } static void *msm_vb2_dma_contig_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { struct msm_vb2_private_data *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->vaddr = (void *)vaddr; priv->size = size; priv->alloc_ctx = alloc_ctx; return priv; } static void msm_vb2_dma_contig_put_userptr(void *buf_priv) { kzfree(buf_priv); } static struct vb2_mem_ops msm_vb2_get_q_mem_op = { .get_userptr = msm_vb2_dma_contig_get_userptr, .put_userptr = msm_vb2_dma_contig_put_userptr, }; struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void) { return &msm_vb2_get_q_mem_op; } static struct vb2_queue *msm_vb2_get_queue(int session_id, unsigned int stream_id) { return msm_get_stream_vb2q(session_id, stream_id); } static struct vb2_buffer *msm_vb2_get_buf(int session_id, unsigned int stream_id) { struct msm_stream *stream; struct vb2_buffer *vb2_buf = NULL; struct msm_vb2_buffer *msm_vb2 = NULL; unsigned long flags; stream = msm_get_stream(session_id, stream_id); if (IS_ERR_OR_NULL(stream)) return NULL; spin_lock_irqsave(&stream->stream_lock, flags); if (!stream->vb2_q) { pr_err("%s: stream q not available\n", __func__); goto end; } list_for_each_entry(msm_vb2, &(stream->queued_list), list) { vb2_buf = &(msm_vb2->vb2_buf); if (vb2_buf->state != VB2_BUF_STATE_ACTIVE) continue; if (msm_vb2->in_freeq) continue; msm_vb2->in_freeq = 1; goto end; } msm_vb2 = NULL; vb2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); return vb2_buf; } static int msm_vb2_put_buf(struct vb2_buffer *vb, int session_id, unsigned int stream_id) { struct msm_stream *stream; struct msm_vb2_buffer *msm_vb2; int rc = 0; unsigned long flags; stream = msm_get_stream(session_id, stream_id); if (IS_ERR_OR_NULL(stream)) return -EINVAL; spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf); if (msm_vb2->in_freeq) { msm_vb2->in_freeq = 0; rc = 0; } else rc = -EINVAL; } else { pr_err("%s: VB buffer is null\n", __func__); rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); return rc; } static int msm_vb2_buf_done(struct vb2_buffer *vb, int session_id, unsigned int stream_id) { unsigned long flags; struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; int rc = 0; stream = msm_get_stream(session_id, stream_id); if (IS_ERR_OR_NULL(stream)) return 0; spin_lock_irqsave(&stream->stream_lock, flags); if (vb) { msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf); /* put buf before buf done */ if (msm_vb2->in_freeq) { vb2_buffer_done(vb, VB2_BUF_STATE_DONE); msm_vb2->in_freeq = 0; rc = 0; } else rc = -EINVAL; } else { pr_err("%s: VB buffer is null\n", __func__); rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); return rc; } int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req) { if (!req) { pr_err("%s: suddev is null\n", __func__); return -EINVAL; } req->get_buf = msm_vb2_get_buf; req->get_vb2_queue = msm_vb2_get_queue; req->put_buf = msm_vb2_put_buf; req->buf_done = msm_vb2_buf_done; return 0; }
gpl-2.0
RockchipOpensourceCommunity/popmetal-android-kernel-3.10
net/sunrpc/svc_xprt.c
1372
35872
/* * linux/net/sunrpc/svc_xprt.c * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sched.h> #include <linux/errno.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/xprt.h> #include <linux/module.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); static void svc_age_temp_xprts(unsigned long closure); static void svc_delete_xprt(struct svc_xprt *xprt); /* apparently the "standard" is that clients close * idle connections after 5 minutes, servers after * 6 minutes * http://www.connectathon.org/talks96/nfstcp.pdf */ static int svc_conn_age_period = 6*60; /* List of registered transport classes */ static DEFINE_SPINLOCK(svc_xprt_class_lock); static LIST_HEAD(svc_xprt_class_list); /* SMP locking strategy: * * svc_pool->sp_lock protects most of the fields of that pool. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * when both need to be taken (rare), svc_serv->sv_lock is first. * BKL protects svc_serv->sv_nrthread. * svc_sock->sk_lock protects the svc_sock->sk_deferred list * and the ->sk_info_authunix cache. * * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being * enqueued multiply. During normal transport processing this bit * is set by svc_xprt_enqueue and cleared by svc_xprt_received. * Providers should not manipulate this bit directly. * * Some flags can be set to certain values at any time * providing that certain rules are followed: * * XPT_CONN, XPT_DATA: * - Can be set or cleared at any time. * - After a set, svc_xprt_enqueue must be called to enqueue * the transport for processing. * - After a clear, the transport must be read/accepted. * If this succeeds, it must be set again. * XPT_CLOSE: * - Can set at any time. It is never cleared. * XPT_DEAD: * - Can only be set while XPT_BUSY is held which ensures * that no other thread will be using the transport or will * try to set XPT_DEAD. */ int svc_reg_xprt_class(struct svc_xprt_class *xcl) { struct svc_xprt_class *cl; int res = -EEXIST; dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); INIT_LIST_HEAD(&xcl->xcl_list); spin_lock(&svc_xprt_class_lock); /* Make sure there isn't already a class with the same name */ list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) goto out; } list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); res = 0; out: spin_unlock(&svc_xprt_class_lock); return res; } EXPORT_SYMBOL_GPL(svc_reg_xprt_class); void svc_unreg_xprt_class(struct svc_xprt_class *xcl) { dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); spin_lock(&svc_xprt_class_lock); list_del_init(&xcl->xcl_list); spin_unlock(&svc_xprt_class_lock); } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); /* * Format the transport list for printing */ int svc_print_xprts(char *buf, int maxlen) { struct svc_xprt_class *xcl; char tmpstr[80]; int len = 0; buf[0] = '\0'; spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen; sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); slen = strlen(tmpstr); if (len + slen > maxlen) break; len += slen; strcat(buf, tmpstr); } spin_unlock(&svc_xprt_class_lock); return len; } static void svc_xprt_free(struct kref *kref) { struct svc_xprt *xprt = container_of(kref, struct svc_xprt, xpt_ref); struct module *owner = xprt->xpt_class->xcl_owner; if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) svcauth_unix_info_release(xprt); put_net(xprt->xpt_net); /* See comment on corresponding get in xs_setup_bc_tcp(): */ if (xprt->xpt_bc_xprt) xprt_put(xprt->xpt_bc_xprt); xprt->xpt_ops->xpo_free(xprt); module_put(owner); } void svc_xprt_put(struct svc_xprt *xprt) { kref_put(&xprt->xpt_ref, svc_xprt_free); } EXPORT_SYMBOL_GPL(svc_xprt_put); /* * Called by transport drivers to initialize the transport independent * portion of the transport instance. */ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, struct svc_xprt *xprt, struct svc_serv *serv) { memset(xprt, 0, sizeof(*xprt)); xprt->xpt_class = xcl; xprt->xpt_ops = xcl->xcl_ops; kref_init(&xprt->xpt_ref); xprt->xpt_server = serv; INIT_LIST_HEAD(&xprt->xpt_list); INIT_LIST_HEAD(&xprt->xpt_ready); INIT_LIST_HEAD(&xprt->xpt_deferred); INIT_LIST_HEAD(&xprt->xpt_users); mutex_init(&xprt->xpt_mutex); spin_lock_init(&xprt->xpt_lock); set_bit(XPT_BUSY, &xprt->xpt_flags); rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); xprt->xpt_net = get_net(net); } EXPORT_SYMBOL_GPL(svc_xprt_init); static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, struct svc_serv *serv, struct net *net, const int family, const unsigned short port, int flags) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; #endif struct sockaddr *sap; size_t len; switch (family) { case PF_INET: sap = (struct sockaddr *)&sin; len = sizeof(sin); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: sap = (struct sockaddr *)&sin6; len = sizeof(sin6); break; #endif default: return ERR_PTR(-EAFNOSUPPORT); } return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); } /* * svc_xprt_received conditionally queues the transport for processing * by another thread. The caller must hold the XPT_BUSY bit and must * not thereafter touch transport data. * * Note: XPT_DATA only gets cleared when a read-attempt finds no (or * insufficient) data. */ static void svc_xprt_received(struct svc_xprt *xprt) { WARN_ON_ONCE(!test_bit(XPT_BUSY, &xprt->xpt_flags)); if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) return; /* As soon as we clear busy, the xprt could be closed and * 'put', so we need a reference to call svc_xprt_enqueue with: */ svc_xprt_get(xprt); clear_bit(XPT_BUSY, &xprt->xpt_flags); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) { clear_bit(XPT_TEMP, &new->xpt_flags); spin_lock_bh(&serv->sv_lock); list_add(&new->xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); svc_xprt_received(new); } int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags) { struct svc_xprt_class *xcl; dprintk("svc: creating transport %s[%d]\n", xprt_name, port); spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { struct svc_xprt *newxprt; unsigned short newport; if (strcmp(xprt_name, xcl->xcl_name)) continue; if (!try_module_get(xcl->xcl_owner)) goto err; spin_unlock(&svc_xprt_class_lock); newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); if (IS_ERR(newxprt)) { module_put(xcl->xcl_owner); return PTR_ERR(newxprt); } svc_add_new_perm_xprt(serv, newxprt); newport = svc_xprt_local_port(newxprt); return newport; } err: spin_unlock(&svc_xprt_class_lock); dprintk("svc: transport %s not found\n", xprt_name); /* This errno is exposed to user space. Provide a reasonable * perror msg for a bad transport. */ return -EPROTONOSUPPORT; } EXPORT_SYMBOL_GPL(svc_create_xprt); /* * Copy the local and remote xprt addresses to the rqstp structure */ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) { memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); rqstp->rq_addrlen = xprt->xpt_remotelen; /* * Destination address in request is needed for binding the * source address in RPC replies/callbacks later. */ memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); rqstp->rq_daddrlen = xprt->xpt_locallen; } EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); /** * svc_print_addr - Format rq_addr field for printing * @rqstp: svc_rqst struct containing address to print * @buf: target buffer for formatted address * @len: length of target buffer * */ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) { return __svc_print_addr(svc_addr(rqstp), buf, len); } EXPORT_SYMBOL_GPL(svc_print_addr); /* * Queue up an idle server thread. Must have pool->sp_lock held. * Note: this is really a stack rather than a queue, so that we only * use as many different threads as we need, and the rest don't pollute * the cache. */ static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) { list_add(&rqstp->rq_list, &pool->sp_threads); } /* * Dequeue an nfsd thread. Must have pool->sp_lock held. */ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) { list_del(&rqstp->rq_list); } static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) { if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) return true; if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) return xprt->xpt_ops->xpo_has_wspace(xprt); return false; } /* * Queue up a transport with data pending. If there are idle nfsd * processes, wake 'em up. * */ void svc_xprt_enqueue(struct svc_xprt *xprt) { struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; if (!svc_xprt_has_something_to_do(xprt)) return; cpu = get_cpu(); pool = svc_pool_for_cpu(xprt->xpt_server, cpu); put_cpu(); spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_threads) && !list_empty(&pool->sp_sockets)) printk(KERN_ERR "svc_xprt_enqueue: " "threads and transports both waiting??\n"); pool->sp_stats.packets++; /* Mark transport as busy. It will remain in this state until * the provider calls svc_xprt_received. We update XPT_BUSY * atomically because it also guards against trying to enqueue * the transport twice. */ if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { /* Don't enqueue transport while already enqueued */ dprintk("svc: transport %p busy, not enqueued\n", xprt); goto out_unlock; } if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); dprintk("svc: transport %p served by daemon %p\n", xprt, rqstp); svc_thread_dequeue(pool, rqstp); if (rqstp->rq_xprt) printk(KERN_ERR "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", rqstp, rqstp->rq_xprt); rqstp->rq_xprt = xprt; svc_xprt_get(xprt); pool->sp_stats.threads_woken++; wake_up(&rqstp->rq_wait); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); pool->sp_stats.sockets_queued++; } out_unlock: spin_unlock_bh(&pool->sp_lock); } EXPORT_SYMBOL_GPL(svc_xprt_enqueue); /* * Dequeue the first transport. Must be called with the pool->sp_lock held. */ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) { struct svc_xprt *xprt; if (list_empty(&pool->sp_sockets)) return NULL; xprt = list_entry(pool->sp_sockets.next, struct svc_xprt, xpt_ready); list_del_init(&xprt->xpt_ready); dprintk("svc: transport %p dequeued, inuse=%d\n", xprt, atomic_read(&xprt->xpt_ref.refcount)); return xprt; } /** * svc_reserve - change the space reserved for the reply to a request. * @rqstp: The request in question * @space: new max space to reserve * * Each request reserves some space on the output queue of the transport * to make sure the reply fits. This function reduces that reserved * space to be the amount of space used already, plus @space. * */ void svc_reserve(struct svc_rqst *rqstp, int space) { space += rqstp->rq_res.head[0].iov_len; if (space < rqstp->rq_reserved) { struct svc_xprt *xprt = rqstp->rq_xprt; atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); rqstp->rq_reserved = space; svc_xprt_enqueue(xprt); } } EXPORT_SYMBOL_GPL(svc_reserve); static void svc_xprt_release(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); kfree(rqstp->rq_deferred); rqstp->rq_deferred = NULL; svc_free_res_pages(rqstp); rqstp->rq_res.page_len = 0; rqstp->rq_res.page_base = 0; /* Reset response buffer and release * the reservation. * But first, check that enough space was reserved * for the reply, otherwise we have a bug! */ if ((rqstp->rq_res.len) > rqstp->rq_reserved) printk(KERN_ERR "RPC request reserved %d but used %d\n", rqstp->rq_reserved, rqstp->rq_res.len); rqstp->rq_res.head[0].iov_len = 0; svc_reserve(rqstp, 0); rqstp->rq_xprt = NULL; svc_xprt_put(xprt); } /* * External function to wake up a server waiting for data * This really only makes sense for services like lockd * which have exactly one thread anyway. */ void svc_wake_up(struct svc_serv *serv) { struct svc_rqst *rqstp; unsigned int i; struct svc_pool *pool; for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[i]; spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); dprintk("svc: daemon %p woken up.\n", rqstp); /* svc_thread_dequeue(pool, rqstp); rqstp->rq_xprt = NULL; */ wake_up(&rqstp->rq_wait); } else pool->sp_task_pending = 1; spin_unlock_bh(&pool->sp_lock); } } EXPORT_SYMBOL_GPL(svc_wake_up); int svc_port_is_privileged(struct sockaddr *sin) { switch (sin->sa_family) { case AF_INET: return ntohs(((struct sockaddr_in *)sin)->sin_port) < PROT_SOCK; case AF_INET6: return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) < PROT_SOCK; default: return 0; } } /* * Make sure that we don't have too many active connections. If we have, * something must be dropped. It's not clear what will happen if we allow * "too many" connections, but when dealing with network-facing software, * we have to code defensively. Here we do that by imposing hard limits. * * There's no point in trying to do random drop here for DoS * prevention. The NFS clients does 1 reconnect in 15 seconds. An * attacker can easily beat that. * * The only somewhat efficient mechanism would be if drop old * connections from the same IP first. But right now we don't even * record the client IP in svc_sock. * * single-threaded services that expect a lot of clients will probably * need to set sv_maxconn to override the default value which is based * on the number of threads */ static void svc_check_conn_limits(struct svc_serv *serv) { unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : (serv->sv_nrthreads+3) * 20; if (serv->sv_tmpcnt > limit) { struct svc_xprt *xprt = NULL; spin_lock_bh(&serv->sv_lock); if (!list_empty(&serv->sv_tempsocks)) { /* Try to help the admin */ net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n", serv->sv_name, serv->sv_maxconn ? "max number of connections" : "number of threads"); /* * Always select the oldest connection. It's not fair, * but so is life */ xprt = list_entry(serv->sv_tempsocks.prev, struct svc_xprt, xpt_list); set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_get(xprt); } spin_unlock_bh(&serv->sv_lock); if (xprt) { svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } } } int svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct xdr_buf *arg; int pages; int i; /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES); if (pages >= RPCSVC_MAXPAGES) /* use as many pages as possible */ pages = RPCSVC_MAXPAGES - 1; for (i = 0; i < pages ; i++) while (rqstp->rq_pages[i] == NULL) { struct page *p = alloc_page(GFP_KERNEL); if (!p) { set_current_state(TASK_INTERRUPTIBLE); if (signalled() || kthread_should_stop()) { set_current_state(TASK_RUNNING); return -EINTR; } schedule_timeout(msecs_to_jiffies(500)); } rqstp->rq_pages[i] = p; } rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ /* Make arg->head point to first page and arg->pages point to rest */ arg = &rqstp->rq_arg; arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); arg->head[0].iov_len = PAGE_SIZE; arg->pages = rqstp->rq_pages + 1; arg->page_base = 0; /* save at least one page for response */ arg->page_len = (pages-2)*PAGE_SIZE; arg->len = (pages-1)*PAGE_SIZE; arg->tail[0].iov_len = 0; return 0; } struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) { struct svc_xprt *xprt; struct svc_pool *pool = rqstp->rq_pool; DECLARE_WAITQUEUE(wait, current); long time_left; /* Normally we will wait up to 5 seconds for any required * cache information to be provided. */ rqstp->rq_chandle.thread_wait = 5*HZ; spin_lock_bh(&pool->sp_lock); xprt = svc_xprt_dequeue(pool); if (xprt) { rqstp->rq_xprt = xprt; svc_xprt_get(xprt); /* As there is a shortage of threads and this request * had to be queued, don't allow the thread to wait so * long for cache updates. */ rqstp->rq_chandle.thread_wait = 1*HZ; pool->sp_task_pending = 0; } else { if (pool->sp_task_pending) { pool->sp_task_pending = 0; spin_unlock_bh(&pool->sp_lock); return ERR_PTR(-EAGAIN); } /* No data pending. Go to sleep */ svc_thread_enqueue(pool, rqstp); /* * We have to be able to interrupt this wait * to bring down the daemons ... */ set_current_state(TASK_INTERRUPTIBLE); /* * checking kthread_should_stop() here allows us to avoid * locking and signalling when stopping kthreads that call * svc_recv. If the thread has already been woken up, then * we can exit here without sleeping. If not, then it * it'll be woken up quickly during the schedule_timeout */ if (kthread_should_stop()) { set_current_state(TASK_RUNNING); spin_unlock_bh(&pool->sp_lock); return ERR_PTR(-EINTR); } add_wait_queue(&rqstp->rq_wait, &wait); spin_unlock_bh(&pool->sp_lock); time_left = schedule_timeout(timeout); try_to_freeze(); spin_lock_bh(&pool->sp_lock); remove_wait_queue(&rqstp->rq_wait, &wait); if (!time_left) pool->sp_stats.threads_timedout++; xprt = rqstp->rq_xprt; if (!xprt) { svc_thread_dequeue(pool, rqstp); spin_unlock_bh(&pool->sp_lock); dprintk("svc: server %p, no data yet\n", rqstp); if (signalled() || kthread_should_stop()) return ERR_PTR(-EINTR); else return ERR_PTR(-EAGAIN); } } spin_unlock_bh(&pool->sp_lock); return xprt; } void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) { spin_lock_bh(&serv->sv_lock); set_bit(XPT_TEMP, &newxpt->xpt_flags); list_add(&newxpt->xpt_list, &serv->sv_tempsocks); serv->sv_tmpcnt++; if (serv->sv_temptimer.function == NULL) { /* setup timer to age temp transports */ setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, (unsigned long)serv); mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } spin_unlock_bh(&serv->sv_lock); svc_xprt_received(newxpt); } static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) { struct svc_serv *serv = rqstp->rq_server; int len = 0; if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); svc_delete_xprt(xprt); /* Leave XPT_BUSY set on the dead xprt: */ return 0; } if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { struct svc_xprt *newxpt; /* * We know this module_get will succeed because the * listener holds a reference too */ __module_get(xprt->xpt_class->xcl_owner); svc_check_conn_limits(xprt->xpt_server); newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) svc_add_new_temp_xprt(serv, newxpt); else module_put(xprt->xpt_class->xcl_owner); } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { /* XPT_DATA|XPT_DEFERRED case: */ dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", rqstp, rqstp->rq_pool->sp_id, xprt, atomic_read(&xprt->xpt_ref.refcount)); rqstp->rq_deferred = svc_deferred_dequeue(xprt); if (rqstp->rq_deferred) len = svc_deferred_recv(rqstp); else len = xprt->xpt_ops->xpo_recvfrom(rqstp); dprintk("svc: got len=%d\n", len); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); } /* clear XPT_BUSY: */ svc_xprt_received(xprt); return len; } /* * Receive the next request on any transport. This code is carefully * organised not to touch any cachelines in the shared svc_serv * structure, only cachelines in the local svc_pool. */ int svc_recv(struct svc_rqst *rqstp, long timeout) { struct svc_xprt *xprt = NULL; struct svc_serv *serv = rqstp->rq_server; int len, err; dprintk("svc: server %p waiting for data (to = %ld)\n", rqstp, timeout); if (rqstp->rq_xprt) printk(KERN_ERR "svc_recv: service %p, transport not NULL!\n", rqstp); if (waitqueue_active(&rqstp->rq_wait)) printk(KERN_ERR "svc_recv: service %p, wait queue active!\n", rqstp); err = svc_alloc_arg(rqstp); if (err) return err; try_to_freeze(); cond_resched(); if (signalled() || kthread_should_stop()) return -EINTR; xprt = svc_get_next_xprt(rqstp, timeout); if (IS_ERR(xprt)) return PTR_ERR(xprt); len = svc_handle_xprt(rqstp, xprt); /* No data, incomplete (TCP) read, or accept() */ if (len <= 0) goto out; clear_bit(XPT_OLD, &xprt->xpt_flags); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); rqstp->rq_chandle.defer = svc_defer; if (serv->sv_stats) serv->sv_stats->netcnt++; return len; out: rqstp->rq_res.len = 0; svc_xprt_release(rqstp); return -EAGAIN; } EXPORT_SYMBOL_GPL(svc_recv); /* * Drop request */ void svc_drop(struct svc_rqst *rqstp) { dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); svc_xprt_release(rqstp); } EXPORT_SYMBOL_GPL(svc_drop); /* * Return reply to client. */ int svc_send(struct svc_rqst *rqstp) { struct svc_xprt *xprt; int len; struct xdr_buf *xb; xprt = rqstp->rq_xprt; if (!xprt) return -EFAULT; /* release the receive skb before sending the reply */ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); /* calculate over-all length */ xb = &rqstp->rq_res; xb->len = xb->head[0].iov_len + xb->page_len + xb->tail[0].iov_len; /* Grab mutex to serialize outgoing data. */ mutex_lock(&xprt->xpt_mutex); if (test_bit(XPT_DEAD, &xprt->xpt_flags) || test_bit(XPT_CLOSE, &xprt->xpt_flags)) len = -ENOTCONN; else len = xprt->xpt_ops->xpo_sendto(rqstp); mutex_unlock(&xprt->xpt_mutex); rpc_wake_up(&xprt->xpt_bc_pending); svc_xprt_release(rqstp); if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) return 0; return len; } /* * Timer function to close old temporary transports, using * a mark-and-sweep algorithm. */ static void svc_age_temp_xprts(unsigned long closure) { struct svc_serv *serv = (struct svc_serv *)closure; struct svc_xprt *xprt; struct list_head *le, *next; dprintk("svc_age_temp_xprts\n"); if (!spin_trylock_bh(&serv->sv_lock)) { /* busy, try again 1 sec later */ dprintk("svc_age_temp_xprts: busy\n"); mod_timer(&serv->sv_temptimer, jiffies + HZ); return; } list_for_each_safe(le, next, &serv->sv_tempsocks) { xprt = list_entry(le, struct svc_xprt, xpt_list); /* First time through, just mark it OLD. Second time * through, close it. */ if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) continue; if (atomic_read(&xprt->xpt_ref.refcount) > 1 || test_bit(XPT_BUSY, &xprt->xpt_flags)) continue; list_del_init(le); set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_DETACHED, &xprt->xpt_flags); dprintk("queuing xprt %p for closing\n", xprt); /* a thread will dequeue and close it soon */ svc_xprt_enqueue(xprt); } spin_unlock_bh(&serv->sv_lock); mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } static void call_xpt_users(struct svc_xprt *xprt) { struct svc_xpt_user *u; spin_lock(&xprt->xpt_lock); while (!list_empty(&xprt->xpt_users)) { u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); list_del(&u->list); u->callback(u); } spin_unlock(&xprt->xpt_lock); } /* * Remove a dead transport */ static void svc_delete_xprt(struct svc_xprt *xprt) { struct svc_serv *serv = xprt->xpt_server; struct svc_deferred_req *dr; /* Only do this once */ if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) BUG(); dprintk("svc: svc_delete_xprt(%p)\n", xprt); xprt->xpt_ops->xpo_detach(xprt); spin_lock_bh(&serv->sv_lock); if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) list_del_init(&xprt->xpt_list); WARN_ON_ONCE(!list_empty(&xprt->xpt_ready)); if (test_bit(XPT_TEMP, &xprt->xpt_flags)) serv->sv_tmpcnt--; spin_unlock_bh(&serv->sv_lock); while ((dr = svc_deferred_dequeue(xprt)) != NULL) kfree(dr); call_xpt_users(xprt); svc_xprt_put(xprt); } void svc_close_xprt(struct svc_xprt *xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ return; /* * We expect svc_close_xprt() to work even when no threads are * running (e.g., while configuring the server before starting * any threads), so if the transport isn't busy, we delete * it ourself: */ svc_delete_xprt(xprt); } EXPORT_SYMBOL_GPL(svc_close_xprt); static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; int ret = 0; spin_lock(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; ret++; set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } spin_unlock(&serv->sv_lock); return ret; } static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) { struct svc_pool *pool; struct svc_xprt *xprt; struct svc_xprt *tmp; int i; for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[i]; spin_lock_bh(&pool->sp_lock); list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { if (xprt->xpt_net != net) continue; list_del_init(&xprt->xpt_ready); spin_unlock_bh(&pool->sp_lock); return xprt; } spin_unlock_bh(&pool->sp_lock); } return NULL; } static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) { struct svc_xprt *xprt; while ((xprt = svc_dequeue_net(serv, net))) { set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_delete_xprt(xprt); } } /* * Server threads may still be running (especially in the case where the * service is still running in other network namespaces). * * So we shut down sockets the same way we would on a running server, by * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do * the close. In the case there are no such other threads, * threads running, svc_clean_up_xprts() does a simple version of a * server's main event loop, and in the case where there are other * threads, we may need to wait a little while and then check again to * see if they're done. */ void svc_close_net(struct svc_serv *serv, struct net *net) { int delay = 0; while (svc_close_list(serv, &serv->sv_permsocks, net) + svc_close_list(serv, &serv->sv_tempsocks, net)) { svc_clean_up_xprts(serv, net); msleep(delay++); } } /* * Handle defer and revisit of requests */ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) { struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); struct svc_xprt *xprt = dr->xprt; spin_lock(&xprt->xpt_lock); set_bit(XPT_DEFERRED, &xprt->xpt_flags); if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { spin_unlock(&xprt->xpt_lock); dprintk("revisit canceled\n"); svc_xprt_put(xprt); kfree(dr); return; } dprintk("revisit queued\n"); dr->xprt = NULL; list_add(&dr->handle.recent, &xprt->xpt_deferred); spin_unlock(&xprt->xpt_lock); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } /* * Save the request off for later processing. The request buffer looks * like this: * * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> * * This code can only handle requests that consist of an xprt-header * and rpc-header. */ static struct cache_deferred_req *svc_defer(struct cache_req *req) { struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); struct svc_deferred_req *dr; if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) return NULL; /* if more than a page, give up FIXME */ if (rqstp->rq_deferred) { dr = rqstp->rq_deferred; rqstp->rq_deferred = NULL; } else { size_t skip; size_t size; /* FIXME maybe discard if size too large */ size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; dr = kmalloc(size, GFP_KERNEL); if (dr == NULL) return NULL; dr->handle.owner = rqstp->rq_server; dr->prot = rqstp->rq_prot; memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); dr->addrlen = rqstp->rq_addrlen; dr->daddr = rqstp->rq_daddr; dr->argslen = rqstp->rq_arg.len >> 2; dr->xprt_hlen = rqstp->rq_xprt_hlen; /* back up head to the start of the buffer and copy */ skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, dr->argslen << 2); } svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; rqstp->rq_dropme = true; dr->handle.revisit = svc_revisit; return &dr->handle; } /* * recv data from a deferred request into an active one */ static int svc_deferred_recv(struct svc_rqst *rqstp) { struct svc_deferred_req *dr = rqstp->rq_deferred; /* setup iov_base past transport header */ rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); /* The iov_len does not include the transport header bytes */ rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; rqstp->rq_arg.page_len = 0; /* The rq_arg.len includes the transport header bytes */ rqstp->rq_arg.len = dr->argslen<<2; rqstp->rq_prot = dr->prot; memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); rqstp->rq_addrlen = dr->addrlen; /* Save off transport header len in case we get deferred again */ rqstp->rq_xprt_hlen = dr->xprt_hlen; rqstp->rq_daddr = dr->daddr; rqstp->rq_respages = rqstp->rq_pages; return (dr->argslen<<2) - dr->xprt_hlen; } static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) { struct svc_deferred_req *dr = NULL; if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) return NULL; spin_lock(&xprt->xpt_lock); if (!list_empty(&xprt->xpt_deferred)) { dr = list_entry(xprt->xpt_deferred.next, struct svc_deferred_req, handle.recent); list_del_init(&dr->handle.recent); } else clear_bit(XPT_DEFERRED, &xprt->xpt_flags); spin_unlock(&xprt->xpt_lock); return dr; } /** * svc_find_xprt - find an RPC transport instance * @serv: pointer to svc_serv to search * @xcl_name: C string containing transport's class name * @net: owner net pointer * @af: Address family of transport's local address * @port: transport's IP port number * * Return the transport instance pointer for the endpoint accepting * connections/peer traffic from the specified transport class, * address family and port. * * Specifying 0 for the address family or port is effectively a * wild-card, and will result in matching the first transport in the * service's list that has a matching class name. */ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, struct net *net, const sa_family_t af, const unsigned short port) { struct svc_xprt *xprt; struct svc_xprt *found = NULL; /* Sanity check the args */ if (serv == NULL || xcl_name == NULL) return found; spin_lock_bh(&serv->sv_lock); list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { if (xprt->xpt_net != net) continue; if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) continue; if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) continue; if (port != 0 && port != svc_xprt_local_port(xprt)) continue; found = xprt; svc_xprt_get(xprt); break; } spin_unlock_bh(&serv->sv_lock); return found; } EXPORT_SYMBOL_GPL(svc_find_xprt); static int svc_one_xprt_name(const struct svc_xprt *xprt, char *pos, int remaining) { int len; len = snprintf(pos, remaining, "%s %u\n", xprt->xpt_class->xcl_name, svc_xprt_local_port(xprt)); if (len >= remaining) return -ENAMETOOLONG; return len; } /** * svc_xprt_names - format a buffer with a list of transport names * @serv: pointer to an RPC service * @buf: pointer to a buffer to be filled in * @buflen: length of buffer to be filled in * * Fills in @buf with a string containing a list of transport names, * each name terminated with '\n'. * * Returns positive length of the filled-in string on success; otherwise * a negative errno value is returned if an error occurs. */ int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) { struct svc_xprt *xprt; int len, totlen; char *pos; /* Sanity check args */ if (!serv) return 0; spin_lock_bh(&serv->sv_lock); pos = buf; totlen = 0; list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { len = svc_one_xprt_name(xprt, pos, buflen - totlen); if (len < 0) { *buf = '\0'; totlen = len; } if (len <= 0) break; pos += len; totlen += len; } spin_unlock_bh(&serv->sv_lock); return totlen; } EXPORT_SYMBOL_GPL(svc_xprt_names); /*----------------------------------------------------------------------------*/ static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) { unsigned int pidx = (unsigned int)*pos; struct svc_serv *serv = m->private; dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); if (!pidx) return SEQ_START_TOKEN; return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); } static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) { struct svc_pool *pool = p; struct svc_serv *serv = m->private; dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); if (p == SEQ_START_TOKEN) { pool = &serv->sv_pools[0]; } else { unsigned int pidx = (pool - &serv->sv_pools[0]); if (pidx < serv->sv_nrpools-1) pool = &serv->sv_pools[pidx+1]; else pool = NULL; } ++*pos; return pool; } static void svc_pool_stats_stop(struct seq_file *m, void *p) { } static int svc_pool_stats_show(struct seq_file *m, void *p) { struct svc_pool *pool = p; if (p == SEQ_START_TOKEN) { seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); return 0; } seq_printf(m, "%u %lu %lu %lu %lu\n", pool->sp_id, pool->sp_stats.packets, pool->sp_stats.sockets_queued, pool->sp_stats.threads_woken, pool->sp_stats.threads_timedout); return 0; } static const struct seq_operations svc_pool_stats_seq_ops = { .start = svc_pool_stats_start, .next = svc_pool_stats_next, .stop = svc_pool_stats_stop, .show = svc_pool_stats_show, }; int svc_pool_stats_open(struct svc_serv *serv, struct file *file) { int err; err = seq_open(file, &svc_pool_stats_seq_ops); if (!err) ((struct seq_file *) file->private_data)->private = serv; return err; } EXPORT_SYMBOL(svc_pool_stats_open); /*----------------------------------------------------------------------------*/
gpl-2.0
Alberto97/android_kernel_lge_dory
drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
2140
6821
/*===================================================== * CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. * * * This file is part of Express Card USB Driver * * $Id: *==================================================== */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/firmware.h> #include "ft1000_usb.h" #include <linux/kthread.h> MODULE_DESCRIPTION("FT1000 EXPRESS CARD DRIVER"); MODULE_LICENSE("Dual MPL/GPL"); MODULE_SUPPORTED_DEVICE("QFT FT1000 Express Cards"); void *pFileStart; size_t FileLength; #define VENDOR_ID 0x1291 /* Qualcomm vendor id */ #define PRODUCT_ID 0x11 /* fake product id */ /* table of devices that work with this driver */ static struct usb_device_id id_table[] = { {USB_DEVICE(VENDOR_ID, PRODUCT_ID)}, {}, }; MODULE_DEVICE_TABLE(usb, id_table); static bool gPollingfailed = FALSE; static int ft1000_poll_thread(void *arg) { int ret; while (!kthread_should_stop()) { msleep(10); if (!gPollingfailed) { ret = ft1000_poll(arg); if (ret != STATUS_SUCCESS) { DEBUG("ft1000_poll_thread: polling failed\n"); gPollingfailed = TRUE; } } } return STATUS_SUCCESS; } static int ft1000_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_device *dev; unsigned numaltsetting; int i, ret = 0, size; struct ft1000_usb *ft1000dev; struct ft1000_info *pft1000info = NULL; const struct firmware *dsp_fw; ft1000dev = kzalloc(sizeof(struct ft1000_usb), GFP_KERNEL); if (!ft1000dev) return -ENOMEM; dev = interface_to_usbdev(interface); DEBUG("ft1000_probe: usb device descriptor info:\n"); DEBUG("ft1000_probe: number of configuration is %d\n", dev->descriptor.bNumConfigurations); ft1000dev->dev = dev; ft1000dev->status = 0; ft1000dev->net = NULL; ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC); DEBUG("ft1000_probe is called\n"); numaltsetting = interface->num_altsetting; DEBUG("ft1000_probe: number of alt settings is :%d\n", numaltsetting); iface_desc = interface->cur_altsetting; DEBUG("ft1000_probe: number of endpoints is %d\n", iface_desc->desc.bNumEndpoints); DEBUG("ft1000_probe: descriptor type is %d\n", iface_desc->desc.bDescriptorType); DEBUG("ft1000_probe: interface number is %d\n", iface_desc->desc.bInterfaceNumber); DEBUG("ft1000_probe: alternatesetting is %d\n", iface_desc->desc.bAlternateSetting); DEBUG("ft1000_probe: interface class is %d\n", iface_desc->desc.bInterfaceClass); DEBUG("ft1000_probe: control endpoint info:\n"); DEBUG("ft1000_probe: descriptor0 type -- %d\n", iface_desc->endpoint[0].desc.bmAttributes); DEBUG("ft1000_probe: descriptor1 type -- %d\n", iface_desc->endpoint[1].desc.bmAttributes); DEBUG("ft1000_probe: descriptor2 type -- %d\n", iface_desc->endpoint[2].desc.bmAttributes); for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { endpoint = (struct usb_endpoint_descriptor *)&iface_desc-> endpoint[i].desc; DEBUG("endpoint %d\n", i); DEBUG("bEndpointAddress=%x, bmAttributes=%x\n", endpoint->bEndpointAddress, endpoint->bmAttributes); if ((endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { ft1000dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; DEBUG("ft1000_probe: in: %d\n", endpoint->bEndpointAddress); } if (!(endpoint->bEndpointAddress & USB_DIR_IN) && ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { ft1000dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; DEBUG("ft1000_probe: out: %d\n", endpoint->bEndpointAddress); } } DEBUG("bulk_in=%d, bulk_out=%d\n", ft1000dev->bulk_in_endpointAddr, ft1000dev->bulk_out_endpointAddr); ret = request_firmware(&dsp_fw, "ft3000.img", &dev->dev); if (ret < 0) { pr_err("Error request_firmware().\n"); goto err_fw; } size = max_t(uint, dsp_fw->size, 4096); pFileStart = kmalloc(size, GFP_KERNEL); if (!pFileStart) { release_firmware(dsp_fw); ret = -ENOMEM; goto err_fw; } memcpy(pFileStart, dsp_fw->data, dsp_fw->size); FileLength = dsp_fw->size; release_firmware(dsp_fw); DEBUG("ft1000_probe: start downloading dsp image...\n"); ret = init_ft1000_netdev(ft1000dev); if (ret) goto err_load; pft1000info = netdev_priv(ft1000dev->net); DEBUG("In probe: pft1000info=%p\n", pft1000info); ret = dsp_reload(ft1000dev); if (ret) { pr_err("Problem with DSP image loading\n"); goto err_load; } gPollingfailed = FALSE; ft1000dev->pPollThread = kthread_run(ft1000_poll_thread, ft1000dev, "ft1000_poll"); if (IS_ERR(ft1000dev->pPollThread)) { ret = PTR_ERR(ft1000dev->pPollThread); goto err_load; } msleep(500); while (!pft1000info->CardReady) { if (gPollingfailed) { ret = -EIO; goto err_thread; } msleep(100); DEBUG("ft1000_probe::Waiting for Card Ready\n"); } DEBUG("ft1000_probe::Card Ready!!!! Registering network device\n"); ret = reg_ft1000_netdev(ft1000dev, interface); if (ret) goto err_thread; ret = ft1000_init_proc(ft1000dev->net); if (ret) goto err_proc; ft1000dev->NetDevRegDone = 1; return 0; err_proc: unregister_netdev(ft1000dev->net); free_netdev(ft1000dev->net); err_thread: kthread_stop(ft1000dev->pPollThread); err_load: kfree(pFileStart); err_fw: kfree(ft1000dev); return ret; } static void ft1000_disconnect(struct usb_interface *interface) { struct ft1000_info *pft1000info; struct ft1000_usb *ft1000dev; DEBUG("ft1000_disconnect is called\n"); pft1000info = (struct ft1000_info *) usb_get_intfdata(interface); DEBUG("In disconnect pft1000info=%p\n", pft1000info); if (pft1000info) { ft1000dev = pft1000info->priv; ft1000_cleanup_proc(pft1000info); if (ft1000dev->pPollThread) kthread_stop(ft1000dev->pPollThread); DEBUG("ft1000_disconnect: threads are terminated\n"); if (ft1000dev->net) { DEBUG("ft1000_disconnect: destroy char driver\n"); ft1000_destroy_dev(ft1000dev->net); unregister_netdev(ft1000dev->net); DEBUG ("ft1000_disconnect: network device unregistered\n"); free_netdev(ft1000dev->net); } usb_free_urb(ft1000dev->rx_urb); usb_free_urb(ft1000dev->tx_urb); DEBUG("ft1000_disconnect: urb freed\n"); kfree(ft1000dev); } kfree(pFileStart); return; } static struct usb_driver ft1000_usb_driver = { .name = "ft1000usb", .probe = ft1000_probe, .disconnect = ft1000_disconnect, .id_table = id_table, }; module_usb_driver(ft1000_usb_driver);
gpl-2.0
Chibaibuki/TCP-IP-Timer-For-Linux-Kernel
drivers/video/fm2fb.c
2396
9258
/* * linux/drivers/video/fm2fb.c -- BSC FrameMaster II/Rainbow II frame buffer * device * * Copyright (C) 1998 Steffen A. Mork (linux-dev@morknet.de) * Copyright (C) 1999 Geert Uytterhoeven * * Written for 2.0.x by Steffen A. Mork * Ported to 2.1.x by Geert Uytterhoeven * Ported to new api by James Simmons * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/zorro.h> #include <asm/io.h> /* * Some technical notes: * * The BSC FrameMaster II (or Rainbow II) is a simple very dumb * frame buffer which allows to display 24 bit true color images. * Each pixel is 32 bit width so it's very easy to maintain the * frame buffer. One long word has the following layout: * AARRGGBB which means: AA the alpha channel byte, RR the red * channel, GG the green channel and BB the blue channel. * * The FrameMaster II supports the following video modes. * - PAL/NTSC * - interlaced/non interlaced * - composite sync/sync/sync over green * * The resolution is to the following both ones: * - 768x576 (PAL) * - 768x480 (NTSC) * * This means that pixel access per line is fixed due to the * fixed line width. In case of maximal resolution the frame * buffer needs an amount of memory of 1.769.472 bytes which * is near to 2 MByte (the allocated address space of Zorro2). * The memory is channel interleaved. That means every channel * owns four VRAMs. Unfortunately most FrameMasters II are * not assembled with memory for the alpha channel. In this * case it could be possible to add the frame buffer into the * normal memory pool. * * At relative address 0x1ffff8 of the frame buffers base address * there exists a control register with the number of * four control bits. They have the following meaning: * bit value meaning * * 0 1 0=interlaced/1=non interlaced * 1 2 0=video out disabled/1=video out enabled * 2 4 0=normal mode as jumpered via JP8/1=complement mode * 3 8 0=read onboard ROM/1 normal operation (required) * * As mentioned above there are several jumper. I think there * is not very much information about the FrameMaster II in * the world so I add these information for completeness. * * JP1 interlace selection (1-2 non interlaced/2-3 interlaced) * JP2 wait state creation (leave as is!) * JP3 wait state creation (leave as is!) * JP4 modulate composite sync on green output (1-2 composite * sync on green channel/2-3 normal composite sync) * JP5 create test signal, shorting this jumper will create * a white screen * JP6 sync creation (1-2 composite sync/2-3 H-sync output) * JP8 video mode (1-2 PAL/2-3 NTSC) * * With the following jumpering table you can connect the * FrameMaster II to a normal TV via SCART connector: * JP1: 2-3 * JP4: 2-3 * JP6: 2-3 * JP8: 1-2 (means PAL for Europe) * * NOTE: * There is no other possibility to change the video timings * except the interlaced/non interlaced, sync control and the * video mode PAL (50 Hz)/NTSC (60 Hz). Inside this * FrameMaster II driver are assumed values to avoid anomalies * to a future X server. Except the pixel clock is really * constant at 30 MHz. * * 9 pin female video connector: * * 1 analog red 0.7 Vss * 2 analog green 0.7 Vss * 3 analog blue 0.7 Vss * 4 H-sync TTL * 5 V-sync TTL * 6 ground * 7 ground * 8 ground * 9 ground * * Some performance notes: * The FrameMaster II was not designed to display a console * this driver would do! It was designed to display still true * color images. Imagine: When scroll up a text line there * must copied ca. 1.7 MBytes to another place inside this * frame buffer. This means 1.7 MByte read and 1.7 MByte write * over the slow 16 bit wide Zorro2 bus! A scroll of one * line needs 1 second so do not expect to much from this * driver - he is at the limit! * */ /* * definitions */ #define FRAMEMASTER_SIZE 0x200000 #define FRAMEMASTER_REG 0x1ffff8 #define FRAMEMASTER_NOLACE 1 #define FRAMEMASTER_ENABLE 2 #define FRAMEMASTER_COMPL 4 #define FRAMEMASTER_ROM 8 static volatile unsigned char *fm2fb_reg; static struct fb_fix_screeninfo fb_fix = { .smem_len = FRAMEMASTER_REG, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = (768 << 2), .mmio_len = (8), .accel = FB_ACCEL_NONE, }; static int fm2fb_mode = -1; #define FM2FB_MODE_PAL 0 #define FM2FB_MODE_NTSC 1 static struct fb_var_screeninfo fb_var_modes[] = { { /* 768 x 576, 32 bpp (PAL) */ 768, 576, 768, 576, 0, 0, 32, 0, { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 8, 0 }, 0, FB_ACTIVATE_NOW, -1, -1, FB_ACCEL_NONE, 33333, 10, 102, 10, 5, 80, 34, FB_SYNC_COMP_HIGH_ACT, 0 }, { /* 768 x 480, 32 bpp (NTSC - not supported yet */ 768, 480, 768, 480, 0, 0, 32, 0, { 16, 8, 0 }, { 8, 8, 0 }, { 0, 8, 0 }, { 24, 8, 0 }, 0, FB_ACTIVATE_NOW, -1, -1, FB_ACCEL_NONE, 33333, 10, 102, 10, 5, 80, 34, FB_SYNC_COMP_HIGH_ACT, 0 } }; /* * Interface used by the world */ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int fm2fb_blank(int blank, struct fb_info *info); static struct fb_ops fm2fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = fm2fb_setcolreg, .fb_blank = fm2fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Blank the display. */ static int fm2fb_blank(int blank, struct fb_info *info) { unsigned char t = FRAMEMASTER_ROM; if (!blank) t |= FRAMEMASTER_ENABLE | FRAMEMASTER_NOLACE; fm2fb_reg[0] = t; return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno < 16) { red >>= 8; green >>= 8; blue >>= 8; ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue; } return 0; } /* * Initialisation */ static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id); static struct zorro_device_id fm2fb_devices[] = { { ZORRO_PROD_BSC_FRAMEMASTER_II }, { ZORRO_PROD_HELFRICH_RAINBOW_II }, { 0 } }; MODULE_DEVICE_TABLE(zorro, fm2fb_devices); static struct zorro_driver fm2fb_driver = { .name = "fm2fb", .id_table = fm2fb_devices, .probe = fm2fb_probe, }; static int fm2fb_probe(struct zorro_dev *z, const struct zorro_device_id *id) { struct fb_info *info; unsigned long *ptr; int is_fm; int x, y; is_fm = z->id == ZORRO_PROD_BSC_FRAMEMASTER_II; if (!zorro_request_device(z,"fm2fb")) return -ENXIO; info = framebuffer_alloc(16 * sizeof(u32), &z->dev); if (!info) { zorro_release_device(z); return -ENOMEM; } if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); zorro_release_device(z); return -ENOMEM; } /* assigning memory to kernel space */ fb_fix.smem_start = zorro_resource_start(z); info->screen_base = ioremap(fb_fix.smem_start, FRAMEMASTER_SIZE); fb_fix.mmio_start = fb_fix.smem_start + FRAMEMASTER_REG; fm2fb_reg = (unsigned char *)(info->screen_base+FRAMEMASTER_REG); strcpy(fb_fix.id, is_fm ? "FrameMaster II" : "Rainbow II"); /* make EBU color bars on display */ ptr = (unsigned long *)fb_fix.smem_start; for (y = 0; y < 576; y++) { for (x = 0; x < 96; x++) *ptr++ = 0xffffff;/* white */ for (x = 0; x < 96; x++) *ptr++ = 0xffff00;/* yellow */ for (x = 0; x < 96; x++) *ptr++ = 0x00ffff;/* cyan */ for (x = 0; x < 96; x++) *ptr++ = 0x00ff00;/* green */ for (x = 0; x < 96; x++) *ptr++ = 0xff00ff;/* magenta */ for (x = 0; x < 96; x++) *ptr++ = 0xff0000;/* red */ for (x = 0; x < 96; x++) *ptr++ = 0x0000ff;/* blue */ for (x = 0; x < 96; x++) *ptr++ = 0x000000;/* black */ } fm2fb_blank(0, info); if (fm2fb_mode == -1) fm2fb_mode = FM2FB_MODE_PAL; info->fbops = &fm2fb_ops; info->var = fb_var_modes[fm2fb_mode]; info->pseudo_palette = info->par; info->par = NULL; info->fix = fb_fix; info->flags = FBINFO_DEFAULT; if (register_framebuffer(info) < 0) { fb_dealloc_cmap(&info->cmap); iounmap(info->screen_base); framebuffer_release(info); zorro_release_device(z); return -EINVAL; } printk("fb%d: %s frame buffer device\n", info->node, fb_fix.id); return 0; } int __init fm2fb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "pal", 3)) fm2fb_mode = FM2FB_MODE_PAL; else if (!strncmp(this_opt, "ntsc", 4)) fm2fb_mode = FM2FB_MODE_NTSC; } return 0; } int __init fm2fb_init(void) { char *option = NULL; if (fb_get_options("fm2fb", &option)) return -ENODEV; fm2fb_setup(option); return zorro_register_driver(&fm2fb_driver); } module_init(fm2fb_init); MODULE_LICENSE("GPL");
gpl-2.0
DirtyUnicorns/android_kernel_samsung_smdk4412
arch/ia64/kernel/mca.c
2396
62270
/* * File: mca.c * Purpose: Generic MCA handling layer * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * Copyright (C) 2002 Dell Inc. * Copyright (C) Matt Domsch <Matt_Domsch@dell.com> * * Copyright (C) 2002 Intel * Copyright (C) Jenna Hall <jenna.s.hall@intel.com> * * Copyright (C) 2001 Intel * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com> * * Copyright (C) 2000 Intel * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> * * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc. * Copyright (C) Vijay Chander <vijay@engr.sgi.com> * * Copyright (C) 2006 FUJITSU LIMITED * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> * * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, * added min save state dump, added INIT handler. * * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com> * Added setup of CMCI and CPEI IRQs, logging of corrected platform * errors, completed code for logging of corrected & uncorrected * machine check errors, and updated for conformance with Nov. 2000 * revision of the SAL 3.0 spec. * * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com> * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag, * set SAL default return values, changed error record structure to * linked list, added init call to sal_get_state_info_size(). * * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com> * GUID cleanups. * * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com> * Added INIT backtrace support. * * 2003-12-08 Keith Owens <kaos@sgi.com> * smp_call_function() must not be called from interrupt context * (can deadlock on tasklist_lock). * Use keventd to call smp_call_function(). * * 2004-02-01 Keith Owens <kaos@sgi.com> * Avoid deadlock when using printk() for MCA and INIT records. * Delete all record printing code, moved to salinfo_decode in user * space. Mark variables and functions static where possible. * Delete dead variables and functions. Reorder to remove the need * for forward declarations and to consolidate related code. * * 2005-08-12 Keith Owens <kaos@sgi.com> * Convert MCA/INIT handlers to use per event stacks and SAL/OS * state. * * 2005-10-07 Keith Owens <kaos@sgi.com> * Add notify_die() hooks. * * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> * Add printing support for MCA/INIT. * * 2007-04-27 Russ Anderson <rja@sgi.com> * Support multiple cpus going through OS_MCA in the same event. */ #include <linux/jiffies.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/acpi.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/workqueue.h> #include <linux/cpumask.h> #include <linux/kdebug.h> #include <linux/cpu.h> #include <linux/gfp.h> #include <asm/delay.h> #include <asm/machvec.h> #include <asm/meminit.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/system.h> #include <asm/sal.h> #include <asm/mca.h> #include <asm/kexec.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include <asm/tlb.h> #include "mca_drv.h" #include "entry.h" #if defined(IA64_MCA_DEBUG_INFO) # define IA64_MCA_DEBUG(fmt...) printk(fmt) #else # define IA64_MCA_DEBUG(fmt...) #endif #define NOTIFY_INIT(event, regs, arg, spin) \ do { \ if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \ == NOTIFY_STOP) && ((spin) == 1)) \ ia64_mca_spin(__func__); \ } while (0) #define NOTIFY_MCA(event, regs, arg, spin) \ do { \ if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \ == NOTIFY_STOP) && ((spin) == 1)) \ ia64_mca_spin(__func__); \ } while (0) /* Used by mca_asm.S */ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ unsigned long __per_cpu_mca[NR_CPUS]; /* In mca_asm.S */ extern void ia64_os_init_dispatch_monarch (void); extern void ia64_os_init_dispatch_slave (void); static int monarch_cpu = -1; static ia64_mc_info_t ia64_mc_info; #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */ #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */ #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */ #define CPE_HISTORY_LENGTH 5 #define CMC_HISTORY_LENGTH 5 #ifdef CONFIG_ACPI static struct timer_list cpe_poll_timer; #endif static struct timer_list cmc_poll_timer; /* * This variable tells whether we are currently in polling mode. * Start with this in the wrong state so we won't play w/ timers * before the system is ready. */ static int cmc_polling_enabled = 1; /* * Clearing this variable prevents CPE polling from getting activated * in mca_late_init. Use it if your system doesn't provide a CPEI, * but encounters problems retrieving CPE logs. This should only be * necessary for debugging. */ static int cpe_poll_enabled = 1; extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); static int mca_init __initdata; /* * limited & delayed printing support for MCA/INIT handler */ #define mprintk(fmt...) ia64_mca_printk(fmt) #define MLOGBUF_SIZE (512+256*NR_CPUS) #define MLOGBUF_MSGMAX 256 static char mlogbuf[MLOGBUF_SIZE]; static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */ static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */ static unsigned long mlogbuf_start; static unsigned long mlogbuf_end; static unsigned int mlogbuf_finished = 0; static unsigned long mlogbuf_timestamp = 0; static int loglevel_save = -1; #define BREAK_LOGLEVEL(__console_loglevel) \ oops_in_progress = 1; \ if (loglevel_save < 0) \ loglevel_save = __console_loglevel; \ __console_loglevel = 15; #define RESTORE_LOGLEVEL(__console_loglevel) \ if (loglevel_save >= 0) { \ __console_loglevel = loglevel_save; \ loglevel_save = -1; \ } \ mlogbuf_finished = 0; \ oops_in_progress = 0; /* * Push messages into buffer, print them later if not urgent. */ void ia64_mca_printk(const char *fmt, ...) { va_list args; int printed_len; char temp_buf[MLOGBUF_MSGMAX]; char *p; va_start(args, fmt); printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args); va_end(args); /* Copy the output into mlogbuf */ if (oops_in_progress) { /* mlogbuf was abandoned, use printk directly instead. */ printk(temp_buf); } else { spin_lock(&mlogbuf_wlock); for (p = temp_buf; *p; p++) { unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE; if (next != mlogbuf_start) { mlogbuf[mlogbuf_end] = *p; mlogbuf_end = next; } else { /* buffer full */ break; } } mlogbuf[mlogbuf_end] = '\0'; spin_unlock(&mlogbuf_wlock); } } EXPORT_SYMBOL(ia64_mca_printk); /* * Print buffered messages. * NOTE: call this after returning normal context. (ex. from salinfod) */ void ia64_mlogbuf_dump(void) { char temp_buf[MLOGBUF_MSGMAX]; char *p; unsigned long index; unsigned long flags; unsigned int printed_len; /* Get output from mlogbuf */ while (mlogbuf_start != mlogbuf_end) { temp_buf[0] = '\0'; p = temp_buf; printed_len = 0; spin_lock_irqsave(&mlogbuf_rlock, flags); index = mlogbuf_start; while (index != mlogbuf_end) { *p = mlogbuf[index]; index = (index + 1) % MLOGBUF_SIZE; if (!*p) break; p++; if (++printed_len >= MLOGBUF_MSGMAX - 1) break; } *p = '\0'; if (temp_buf[0]) printk(temp_buf); mlogbuf_start = index; mlogbuf_timestamp = 0; spin_unlock_irqrestore(&mlogbuf_rlock, flags); } } EXPORT_SYMBOL(ia64_mlogbuf_dump); /* * Call this if system is going to down or if immediate flushing messages to * console is required. (ex. recovery was failed, crash dump is going to be * invoked, long-wait rendezvous etc.) * NOTE: this should be called from monarch. */ static void ia64_mlogbuf_finish(int wait) { BREAK_LOGLEVEL(console_loglevel); spin_lock_init(&mlogbuf_rlock); ia64_mlogbuf_dump(); printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " "MCA/INIT might be dodgy or fail.\n"); if (!wait) return; /* wait for console */ printk("Delaying for 5 seconds...\n"); udelay(5*1000000); mlogbuf_finished = 1; } /* * Print buffered messages from INIT context. */ static void ia64_mlogbuf_dump_from_init(void) { if (mlogbuf_finished) return; if (mlogbuf_timestamp && time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " " and the system seems to be messed up.\n"); ia64_mlogbuf_finish(0); return; } if (!spin_trylock(&mlogbuf_rlock)) { printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. " "Generated messages other than stack dump will be " "buffered to mlogbuf and will be printed later.\n"); printk(KERN_ERR "INIT: If messages would not printed after " "this INIT, wait 30sec and assert INIT again.\n"); if (!mlogbuf_timestamp) mlogbuf_timestamp = jiffies; return; } spin_unlock(&mlogbuf_rlock); ia64_mlogbuf_dump(); } static void inline ia64_mca_spin(const char *func) { if (monarch_cpu == smp_processor_id()) ia64_mlogbuf_finish(0); mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); while (1) cpu_relax(); } /* * IA64_MCA log support */ #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */ #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */ typedef struct ia64_state_log_s { spinlock_t isl_lock; int isl_index; unsigned long isl_count; ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ } ia64_state_log_t; static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; #define IA64_LOG_ALLOCATE(it, size) \ {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ (ia64_err_rec_t *)alloc_bootmem(size); \ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ (ia64_err_rec_t *)alloc_bootmem(size);} #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index #define IA64_LOG_INDEX_INC(it) \ {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \ ia64_state_log[it].isl_count++;} #define IA64_LOG_INDEX_DEC(it) \ ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count /* * ia64_log_init * Reset the OS ia64 log buffer * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * Outputs : None */ static void __init ia64_log_init(int sal_info_type) { u64 max_size = 0; IA64_LOG_NEXT_INDEX(sal_info_type) = 0; IA64_LOG_LOCK_INIT(sal_info_type); // SAL will tell us the maximum size of any error record of this type max_size = ia64_sal_get_state_info_size(sal_info_type); if (!max_size) /* alloc_bootmem() doesn't like zero-sized allocations! */ return; // set up OS data structures to hold error info IA64_LOG_ALLOCATE(sal_info_type, max_size); memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); } /* * ia64_log_get * * Get the current MCA log from SAL and copy it into the OS log buffer. * * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * irq_safe whether you can use printk at this point * Outputs : size (total record length) * *buffer (ptr to error record) * */ static u64 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) { sal_log_record_header_t *log_buffer; u64 total_len = 0; unsigned long s; IA64_LOG_LOCK(sal_info_type); /* Get the process state information */ log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type); total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer); if (total_len) { IA64_LOG_INDEX_INC(sal_info_type); IA64_LOG_UNLOCK(sal_info_type); if (irq_safe) { IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n", __func__, sal_info_type, total_len); } *buffer = (u8 *) log_buffer; return total_len; } else { IA64_LOG_UNLOCK(sal_info_type); return 0; } } /* * ia64_mca_log_sal_error_record * * This function retrieves a specified error record type from SAL * and wakes up any processes waiting for error records. * * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) * FIXME: remove MCA and irq_safe. */ static void ia64_mca_log_sal_error_record(int sal_info_type) { u8 *buffer; sal_log_record_header_t *rh; u64 size; int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; #ifdef IA64_MCA_DEBUG_INFO static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; #endif size = ia64_log_get(sal_info_type, &buffer, irq_safe); if (!size) return; salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe); if (irq_safe) IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n", smp_processor_id(), sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN"); /* Clear logs from corrected errors in case there's no user-level logger */ rh = (sal_log_record_header_t *)buffer; if (rh->severity == sal_log_severity_corrected) ia64_sal_clear_state_info(sal_info_type); } /* * search_mca_table * See if the MCA surfaced in an instruction range * that has been tagged as recoverable. * * Inputs * first First address range to check * last Last address range to check * ip Instruction pointer, address we are looking for * * Return value: * 1 on Success (in the table)/ 0 on Failure (not in the table) */ int search_mca_table (const struct mca_table_entry *first, const struct mca_table_entry *last, unsigned long ip) { const struct mca_table_entry *curr; u64 curr_start, curr_end; curr = first; while (curr <= last) { curr_start = (u64) &curr->start_addr + curr->start_addr; curr_end = (u64) &curr->end_addr + curr->end_addr; if ((ip >= curr_start) && (ip <= curr_end)) { return 1; } curr++; } return 0; } /* Given an address, look for it in the mca tables. */ int mca_recover_range(unsigned long addr) { extern struct mca_table_entry __start___mca_table[]; extern struct mca_table_entry __stop___mca_table[]; return search_mca_table(__start___mca_table, __stop___mca_table-1, addr); } EXPORT_SYMBOL_GPL(mca_recover_range); #ifdef CONFIG_ACPI int cpe_vector = -1; int ia64_cpe_irq = -1; static irqreturn_t ia64_mca_cpe_int_handler (int cpe_irq, void *arg) { static unsigned long cpe_history[CPE_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cpe_history_lock); IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __func__, cpe_irq, smp_processor_id()); /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); spin_lock(&cpe_history_lock); if (!cpe_poll_enabled && cpe_vector >= 0) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CPE_HISTORY_LENGTH; i++) { if (now - cpe_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); if (count >= CPE_HISTORY_LENGTH) { cpe_poll_enabled = 1; spin_unlock(&cpe_history_lock); disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); /* lock already released, get out now */ goto out; } else { cpe_history[index++] = now; if (index == CPE_HISTORY_LENGTH) index = 0; } } spin_unlock(&cpe_history_lock); out: /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); local_irq_disable(); return IRQ_HANDLED; } #endif /* CONFIG_ACPI */ #ifdef CONFIG_ACPI /* * ia64_mca_register_cpev * * Register the corrected platform error vector with SAL. * * Inputs * cpev Corrected Platform Error Vector number * * Outputs * None */ void ia64_mca_register_cpev (int cpev) { /* Register the CPE interrupt vector with SAL */ struct ia64_sal_retval isrv; isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); if (isrv.status) { printk(KERN_ERR "Failed to register Corrected Platform " "Error interrupt vector with SAL (status %ld)\n", isrv.status); return; } IA64_MCA_DEBUG("%s: corrected platform error " "vector %#x registered\n", __func__, cpev); } #endif /* CONFIG_ACPI */ /* * ia64_mca_cmc_vector_setup * * Setup the corrected machine check vector register in the processor. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) * This function is invoked on a per-processor basis. * * Inputs * None * * Outputs * None */ void __cpuinit ia64_mca_cmc_vector_setup (void) { cmcv_reg_t cmcv; cmcv.cmcv_regval = 0; cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ cmcv.cmcv_vector = IA64_CMC_VECTOR; ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n", __func__, smp_processor_id(), IA64_CMC_VECTOR); IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); } /* * ia64_mca_cmc_vector_disable * * Mask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */ static void ia64_mca_cmc_vector_disable (void *dummy) { cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector); } /* * ia64_mca_cmc_vector_enable * * Unmask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */ static void ia64_mca_cmc_vector_enable (void *dummy) { cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector); } /* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */ static void ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); } /* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */ static void ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); } /* * ia64_mca_wakeup * * Send an inter-cpu interrupt to wake-up a particular cpu. * * Inputs : cpuid * Outputs : None */ static void ia64_mca_wakeup(int cpu) { platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); } /* * ia64_mca_wakeup_all * * Wakeup all the slave cpus which have rendez'ed previously. * * Inputs : None * Outputs : None */ static void ia64_mca_wakeup_all(void) { int cpu; /* Clear the Rendez checkin flag for all cpus */ for_each_online_cpu(cpu) { if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) ia64_mca_wakeup(cpu); } } /* * ia64_mca_rendez_interrupt_handler * * This is handler used to put slave processors into spinloop * while the monarch processor does the mca handling and later * wake each slave up once the monarch is done. The state * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates * the cpu has come out of OS rendezvous. * * Inputs : None * Outputs : None */ static irqreturn_t ia64_mca_rendez_int_handler(int rendez_irq, void *arg) { unsigned long flags; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = NULL, .monarch_cpu = &monarch_cpu }; /* Mask all interrupts */ local_irq_save(flags); NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; /* Register with the SAL monarch that the slave has * reached SAL */ ia64_sal_mc_rendez(); NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1); /* Wait for the monarch cpu to exit. */ while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* Enable all interrupts */ local_irq_restore(flags); return IRQ_HANDLED; } /* * ia64_mca_wakeup_int_handler * * The interrupt handler for processing the inter-cpu interrupt to the * slave cpu which was spinning in the rendez loop. * Since this spinning is done by turning off the interrupts and * polling on the wakeup-interrupt bit in the IRR, there is * nothing useful to be done in the handler. * * Inputs : wakeup_irq (Wakeup-interrupt bit) * arg (Interrupt handler specific argument) * Outputs : None * */ static irqreturn_t ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg) { return IRQ_HANDLED; } /* Function pointer for extra MCA recovery */ int (*ia64_mca_ucmc_extension) (void*,struct ia64_sal_os_state*) = NULL; int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) { if (ia64_mca_ucmc_extension) return 1; ia64_mca_ucmc_extension = fn; return 0; } void ia64_unreg_MCA_extension(void) { if (ia64_mca_ucmc_extension) ia64_mca_ucmc_extension = NULL; } EXPORT_SYMBOL(ia64_reg_MCA_extension); EXPORT_SYMBOL(ia64_unreg_MCA_extension); static inline void copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) { u64 fslot, tslot, nat; *tr = *fr; fslot = ((unsigned long)fr >> 3) & 63; tslot = ((unsigned long)tr >> 3) & 63; *tnat &= ~(1UL << tslot); nat = (fnat >> fslot) & 1; *tnat |= (nat << tslot); } /* Change the comm field on the MCA/INT task to include the pid that * was interrupted, it makes for easier debugging. If that pid was 0 * (swapper or nested MCA/INIT) then use the start of the previous comm * field suffixed with its cpu. */ static void ia64_mca_modify_comm(const struct task_struct *previous_current) { char *p, comm[sizeof(current->comm)]; if (previous_current->pid) snprintf(comm, sizeof(comm), "%s %d", current->comm, previous_current->pid); else { int l; if ((p = strchr(previous_current->comm, ' '))) l = p - previous_current->comm; else l = strlen(previous_current->comm); snprintf(comm, sizeof(comm), "%s %*s %d", current->comm, l, previous_current->comm, task_thread_info(previous_current)->cpu); } memcpy(current->comm, comm, sizeof(current->comm)); } static void finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { const pal_min_state_area_t *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use * pmsa_{xip,xpsr,xfs} */ if (ia64_psr(regs)->ic) { regs->cr_iip = ms->pmsa_iip; regs->cr_ipsr = ms->pmsa_ipsr; regs->cr_ifs = ms->pmsa_ifs; } else { regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; sos->iip = ms->pmsa_iip; sos->ipsr = ms->pmsa_ipsr; sos->ifs = ms->pmsa_ifs; } regs->pr = ms->pmsa_pr; regs->b0 = ms->pmsa_br0; regs->ar_rsc = ms->pmsa_rsc; copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat); copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat); copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat); copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat); copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat); copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat); copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat); copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat); copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat); copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat); copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat); if (ia64_psr(regs)->bn) bank = ms->pmsa_bank1_gr; else bank = ms->pmsa_bank0_gr; copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat); copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat); copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat); copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat); copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat); copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat); copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat); copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat); copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat); copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat); copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat); copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat); copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat); copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat); copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat); copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat); } /* On entry to this routine, we are running on the per cpu stack, see * mca_asm.h. The original stack has not been touched by this event. Some of * the original stack's registers will be in the RBS on this stack. This stack * also contains a partial pt_regs and switch_stack, the rest of the data is in * PAL minstate. * * The first thing to do is modify the original stack to look like a blocked * task so we can run backtrace on the original task. Also mark the per cpu * stack as current to ensure that we use the correct task state, it also means * that we can do backtrace on the MCA/INIT handler code itself. */ static struct task_struct * ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, const char *type) { char *p; ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const pal_min_state_area_t *ms = sos->pal_min_state; struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + sizeof(struct switch_stack) + 16; unsigned long *old_bspstore, *old_bsp; unsigned long *new_bspstore, *new_bsp; unsigned long old_unat, old_rnat, new_rnat, nat; u64 slots, loadrs = regs->loadrs; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); const char *msg; int cpu = smp_processor_id(); previous_current = curr_task(cpu); set_curr_task(cpu, current); if ((p = strchr(current->comm, ' '))) *p = '\0'; /* Best effort attempt to cope with MCA/INIT delivered while in * physical mode. */ regs->cr_ipsr = ms->pmsa_ipsr; if (ia64_psr(regs)->dt == 0) { va.l = r12; if (va.f.reg == 0) { va.f.reg = 7; r12 = va.l; } va.l = r13; if (va.f.reg == 0) { va.f.reg = 7; r13 = va.l; } } if (ia64_psr(regs)->rt == 0) { va.l = ar_bspstore; if (va.f.reg == 0) { va.f.reg = 7; ar_bspstore = va.l; } va.l = ar_bsp; if (va.f.reg == 0) { va.f.reg = 7; ar_bsp = va.l; } } /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers * have been copied to the old stack, the old stack may fail the * validation tests below. So ia64_old_stack() must restore the dirty * registers from the new stack. The old and new bspstore probably * have different alignments, so loadrs calculated on the old bsp * cannot be used to restore from the new bsp. Calculate a suitable * loadrs for the new stack and save it in the new pt_regs, where * ia64_old_stack() can get it. */ old_bspstore = (unsigned long *)ar_bspstore; old_bsp = (unsigned long *)ar_bsp; slots = ia64_rse_num_regs(old_bspstore, old_bsp); new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); new_bsp = ia64_rse_skip_regs(new_bspstore, slots); regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; /* Verify the previous stack state before we change it */ if (user_mode(regs)) { msg = "occurred in user space"; /* previous_current is guaranteed to be valid when the task was * in user space, so ... */ ia64_mca_modify_comm(previous_current); goto no_mod; } if (r13 != sos->prev_IA64_KR_CURRENT) { msg = "inconsistent previous current and r13"; goto no_mod; } if (!mca_recover_range(ms->pmsa_iip)) { if ((r12 - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent r12 and r13"; goto no_mod; } if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bspstore and r13"; goto no_mod; } va.p = old_bspstore; if (va.f.reg < 5) { msg = "old_bspstore is in the wrong region"; goto no_mod; } if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bsp and r13"; goto no_mod; } size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; if (ar_bspstore + size > r12) { msg = "no room for blocked state"; goto no_mod; } } ia64_mca_modify_comm(previous_current); /* Make the original task look blocked. First stack a struct pt_regs, * describing the state at the time of interrupt. mca_asm.S built a * partial pt_regs, copy it and fill in the blanks using minstate. */ p = (char *)r12 - sizeof(*regs); old_regs = (struct pt_regs *)p; memcpy(old_regs, regs, sizeof(*regs)); old_regs->loadrs = loadrs; old_unat = old_regs->ar_unat; finish_pt_regs(old_regs, sos, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and * minstate. * * In the synthesized switch_stack, b0 points to ia64_leave_kernel, * ar.pfs is set to 0. * * unwind.c::unw_unwind() does special processing for interrupt frames. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not * that this is documented, of course. Set PRED_NON_SYSCALL in the * switch_stack on the original stack so it will unwind correctly when * unwind.c reads pt_regs. * * thread.ksp is updated to point to the synthesized switch_stack. */ p -= sizeof(struct switch_stack); old_sw = (struct switch_stack *)p; memcpy(old_sw, sw, sizeof(*sw)); old_sw->caller_unat = old_unat; old_sw->ar_fpsr = old_regs->ar_fpsr; copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); old_sw->b0 = (u64)ia64_leave_kernel; old_sw->b1 = ms->pmsa_br1; old_sw->ar_pfs = 0; old_sw->ar_unat = old_unat; old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); previous_current->thread.ksp = (u64)p - 16; /* Finally copy the original stack's registers back to its RBS. * Registers from ar.bspstore through ar.bsp at the time of the event * are in the current RBS, copy them back to the original stack. The * copy must be done register by register because the original bspstore * and the current one have different alignments, so the saved RNAT * data occurs at different places. * * mca_asm does cover, so the old_bsp already includes all registers at * the time of MCA/INIT. It also does flushrs, so all registers before * this function have been written to backing store on the MCA/INIT * stack. */ new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); old_rnat = regs->ar_rnat; while (slots--) { if (ia64_rse_is_rnat_slot(new_bspstore)) { new_rnat = ia64_get_rnat(new_bspstore++); } if (ia64_rse_is_rnat_slot(old_bspstore)) { *old_bspstore++ = old_rnat; old_rnat = 0; } nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); *old_bspstore++ = *new_bspstore++; } old_sw->ar_bspstore = (unsigned long)old_bspstore; old_sw->ar_rnat = old_rnat; sos->prev_task = previous_current; return previous_current; no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); old_unat = regs->ar_unat; finish_pt_regs(regs, sos, &old_unat); return previous_current; } /* The monarch/slave interaction is based on monarch_cpu and requires that all * slaves have entered rendezvous before the monarch leaves. If any cpu has * not entered rendezvous yet then wait a bit. The assumption is that any * slave that has not rendezvoused after a reasonable time is never going to do * so. In this context, slave includes cpus that respond to the MCA rendezvous * interrupt, as well as cpus that receive the INIT slave event. */ static void ia64_wait_for_slaves(int monarch, const char *type) { int c, i , wait; /* * wait 5 seconds total for slaves (arbitrary) */ for (i = 0; i < 5000; i++) { wait = 0; for_each_online_cpu(c) { if (c == monarch) continue; if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { udelay(1000); /* short wait */ wait = 1; break; } } if (!wait) goto all_in; } /* * Maybe slave(s) dead. Print buffered messages immediately. */ ia64_mlogbuf_finish(0); mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); for_each_online_cpu(c) { if (c == monarch) continue; if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) mprintk(" %d", c); } mprintk("\n"); return; all_in: mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); return; } /* mca_insert_tr * * Switch rid when TR reload and needed! * iord: 1: itr, 2: itr; * */ static void mca_insert_tr(u64 iord) { int i; u64 old_rr; struct ia64_tr_entry *p; unsigned long psr; int cpu = smp_processor_id(); if (!ia64_idtrs[cpu]) return; psr = ia64_clear_ic(); for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; if (p->pte & 0x1) { old_rr = ia64_get_rr(p->ifa); if (old_rr != p->rr) { ia64_set_rr(p->ifa, p->rr); ia64_srlz_d(); } ia64_ptr(iord, p->ifa, p->itir >> 2); ia64_srlz_i(); if (iord & 0x1) { ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); ia64_srlz_i(); } if (iord & 0x2) { ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); ia64_srlz_i(); } if (old_rr != p->rr) { ia64_set_rr(p->ifa, old_rr); ia64_srlz_d(); } } } ia64_set_psr(psr); } /* * ia64_mca_handler * * This is uncorrectable machine check handler called from OS_MCA * dispatch code which is in turn called from SAL_CHECK(). * This is the place where the core of OS MCA handling is done. * Right now the logs are extracted and displayed in a well-defined * format. This handler code is supposed to be run only on the * monarch processor. Once the monarch is done with MCA handling * further MCA logging is enabled by clearing logs. * Monarch also has the duty of sending wakeup-IPIs to pull the * slave processors out of rendezvous spinloop. * * If multiple processors call into OS_MCA, the first will become * the monarch. Subsequent cpus will be recorded in the mca_cpu * bitmask. After the first monarch has processed its MCA, it * will wake up the next cpu in the mca_cpu bitmask and then go * into the rendezvous loop. When all processors have serviced * their MCA, the last monarch frees up the rest of the processors. */ void ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, struct ia64_sal_os_state *sos) { int recover, cpu = smp_processor_id(); struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover }; static atomic_t mca_count; static cpumask_t mca_cpu; if (atomic_add_return(1, &mca_count) == 1) { monarch_cpu = cpu; sos->monarch = 1; } else { cpu_set(cpu, mca_cpu); sos->monarch = 0; } mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; if (sos->monarch) { ia64_wait_for_slaves(cpu, "MCA"); /* Wakeup all the processors which are spinning in the * rendezvous loop. They will leave SAL, then spin in the OS * with interrupts disabled until this monarch cpu leaves the * MCA handler. That gets control back to the OS so we can * backtrace the other cpus, backtrace when spinning in SAL * does not work. */ ia64_mca_wakeup_all(); } else { while (cpu_isset(cpu, mca_cpu)) cpu_relax(); /* spin until monarch wakes us */ } NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1); /* Get the MCA error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); /* MCA error recovery */ recover = (ia64_mca_ucmc_extension && ia64_mca_ucmc_extension( IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), sos)); if (recover) { sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); rh->severity = sal_log_severity_corrected; ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); sos->os_status = IA64_MCA_CORRECTED; } else { /* Dump buffered message to console */ ia64_mlogbuf_finish(1); } if (__get_cpu_var(ia64_mca_tr_reload)) { mca_insert_tr(0x1); /*Reload dynamic itrs*/ mca_insert_tr(0x2); /*Reload dynamic itrs*/ } NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1); if (atomic_dec_return(&mca_count) > 0) { int i; /* wake up the next monarch cpu, * and put this cpu in the rendez loop. */ for_each_online_cpu(i) { if (cpu_isset(i, mca_cpu)) { monarch_cpu = i; cpu_clear(i, mca_cpu); /* wake next cpu */ while (monarch_cpu != -1) cpu_relax(); /* spin until last cpu leaves */ set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; return; } } } set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; monarch_cpu = -1; /* This frees the slaves and previous monarchs */ } static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); /* * ia64_mca_cmc_int_handler * * This is corrected machine check interrupt handler. * Right now the logs are extracted and displayed in a well-defined * format. * * Inputs * interrupt number * client data arg ptr * * Outputs * None */ static irqreturn_t ia64_mca_cmc_int_handler(int cmc_irq, void *arg) { static unsigned long cmc_history[CMC_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cmc_history_lock); IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __func__, cmc_irq, smp_processor_id()); /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); spin_lock(&cmc_history_lock); if (!cmc_polling_enabled) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CMC_HISTORY_LENGTH; i++) { if (now - cmc_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); if (count >= CMC_HISTORY_LENGTH) { cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock); /* If we're being hit with CMC interrupts, we won't * ever execute the schedule_work() below. Need to * disable CMC interrupts on this processor now. */ ia64_mca_cmc_vector_disable(NULL); schedule_work(&cmc_disable_work); /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); /* lock already released, get out now */ goto out; } else { cmc_history[index++] = now; if (index == CMC_HISTORY_LENGTH) index = 0; } } spin_unlock(&cmc_history_lock); out: /* Get the CMC error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); return IRQ_HANDLED; } /* * ia64_mca_cmc_int_caller * * Triggered by sw interrupt from CMC polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * Outputs * handled */ static irqreturn_t ia64_mca_cmc_int_caller(int cmc_irq, void *arg) { static int start_count = -1; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); ia64_mca_cmc_int_handler(cmc_irq, arg); cpuid = cpumask_next(cpuid+1, cpu_online_mask); if (cpuid < nr_cpu_ids) { platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* If no log record, switch out of polling mode */ if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); schedule_work(&cmc_enable_work); cmc_polling_enabled = 0; } else { mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); } start_count = -1; } return IRQ_HANDLED; } /* * ia64_mca_cmc_poll * * Poll for Corrected Machine Checks (CMCs) * * Inputs : dummy(unused) * Outputs : None * */ static void ia64_mca_cmc_poll (unsigned long dummy) { /* Trigger a CMC interrupt cascade */ platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } /* * ia64_mca_cpe_int_caller * * Triggered by sw interrupt from CPE polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * Outputs * handled */ #ifdef CONFIG_ACPI static irqreturn_t ia64_mca_cpe_int_caller(int cpe_irq, void *arg) { static int start_count = -1; static int poll_time = MIN_CPE_POLL_INTERVAL; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); ia64_mca_cpe_int_handler(cpe_irq, arg); cpuid = cpumask_next(cpuid+1, cpu_online_mask); if (cpuid < NR_CPUS) { platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* * If a log was recorded, increase our polling frequency, * otherwise, backoff or return to interrupt mode. */ if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); } else if (cpe_vector < 0) { poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); } else { poll_time = MIN_CPE_POLL_INTERVAL; printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); cpe_poll_enabled = 0; } if (cpe_poll_enabled) mod_timer(&cpe_poll_timer, jiffies + poll_time); start_count = -1; } return IRQ_HANDLED; } /* * ia64_mca_cpe_poll * * Poll for Corrected Platform Errors (CPEs), trigger interrupt * on first cpu, from there it will trickle through all the cpus. * * Inputs : dummy(unused) * Outputs : None * */ static void ia64_mca_cpe_poll (unsigned long dummy) { /* Trigger a CPE interrupt cascade */ platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } #endif /* CONFIG_ACPI */ static int default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) { int c; struct task_struct *g, *t; if (val != DIE_INIT_MONARCH_PROCESS) return NOTIFY_DONE; #ifdef CONFIG_KEXEC if (atomic_read(&kdump_in_progress)) return NOTIFY_DONE; #endif /* * FIXME: mlogbuf will brim over with INIT stack dumps. * To enable show_stack from INIT, we use oops_in_progress which should * be used in real oops. This would cause something wrong after INIT. */ BREAK_LOGLEVEL(console_loglevel); ia64_mlogbuf_dump_from_init(); printk(KERN_ERR "Processes interrupted by INIT -"); for_each_online_cpu(c) { struct ia64_sal_os_state *s; t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); g = s->prev_task; if (g) { if (g->pid) printk(" %d", g->pid); else printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); } } printk("\n\n"); if (read_trylock(&tasklist_lock)) { do_each_thread (g, t) { printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); show_stack(t, NULL); } while_each_thread (g, t); read_unlock(&tasklist_lock); } /* FIXME: This will not restore zapped printk locks. */ RESTORE_LOGLEVEL(console_loglevel); return NOTIFY_DONE; } /* * C portion of the OS INIT handler * * Called from ia64_os_init_dispatch * * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for * this event. This code is used for both monarch and slave INIT events, see * sos->monarch. * * All INIT events switch to the INIT stack and change the previous process to * blocked status. If one of the INIT events is the monarch then we are * probably processing the nmi button/command. Use the monarch cpu to dump all * the processes. The slave INIT events all spin until the monarch cpu * returns. We can also get INIT slave events for MCA, in which case the MCA * process is the monarch. */ void ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, struct ia64_sal_os_state *sos) { static atomic_t slaves; static atomic_t monarchs; struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0); mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); sos->os_status = IA64_INIT_RESUME; /* FIXME: Workaround for broken proms that drive all INIT events as * slaves. The last slave that enters is promoted to be a monarch. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", __func__, cpu); atomic_dec(&slaves); sos->monarch = 1; } /* FIXME: Workaround for broken proms that drive all INIT events as * monarchs. Second and subsequent monarchs are demoted to slaves. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", __func__, cpu); atomic_dec(&monarchs); sos->monarch = 0; } if (!sos->monarch) { ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; #ifdef CONFIG_KEXEC while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress)) udelay(1000); #else while (monarch_cpu == -1) cpu_relax(); /* spin until monarch enters */ #endif NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1); #ifdef CONFIG_KEXEC while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress)) udelay(1000); #else while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ #endif NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); mprintk("Slave on cpu %d returning to normal service.\n", cpu); set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; atomic_dec(&slaves); return; } monarch_cpu = cpu; NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1); /* * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * generated via the BMC's command-line interface, but since the console is on the * same serial line, the user will need some time to switch out of the BMC before * the dump begins. */ mprintk("Delaying for 5 seconds...\n"); udelay(5*1000000); ia64_wait_for_slaves(cpu, "INIT"); /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through * to default_monarch_init_process() above and just print all the * tasks. */ NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1); NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); atomic_dec(&monarchs); set_curr_task(cpu, previous_current); monarch_cpu = -1; return; } static int __init ia64_mca_disable_cpe_polling(char *str) { cpe_poll_enabled = 0; return 1; } __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); static struct irqaction cmci_irqaction = { .handler = ia64_mca_cmc_int_handler, .flags = IRQF_DISABLED, .name = "cmc_hndlr" }; static struct irqaction cmcp_irqaction = { .handler = ia64_mca_cmc_int_caller, .flags = IRQF_DISABLED, .name = "cmc_poll" }; static struct irqaction mca_rdzv_irqaction = { .handler = ia64_mca_rendez_int_handler, .flags = IRQF_DISABLED, .name = "mca_rdzv" }; static struct irqaction mca_wkup_irqaction = { .handler = ia64_mca_wakeup_int_handler, .flags = IRQF_DISABLED, .name = "mca_wkup" }; #ifdef CONFIG_ACPI static struct irqaction mca_cpe_irqaction = { .handler = ia64_mca_cpe_int_handler, .flags = IRQF_DISABLED, .name = "cpe_hndlr" }; static struct irqaction mca_cpep_irqaction = { .handler = ia64_mca_cpe_int_caller, .flags = IRQF_DISABLED, .name = "cpe_poll" }; #endif /* CONFIG_ACPI */ /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on * these stacks can never sleep, they cannot return from the kernel to user * space, they do not appear in a normal ps listing. So there is no need to * format most of the fields. */ static void __cpuinit format_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu) { struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); struct thread_info *ti; memset(p, 0, KERNEL_STACK_SIZE); ti = task_thread_info(p); ti->flags = _TIF_MCA_INIT; ti->preempt_count = 1; ti->task = p; ti->cpu = cpu; p->stack = ti; p->state = TASK_UNINTERRUPTIBLE; cpu_set(cpu, p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); strncpy(p->comm, type, sizeof(p->comm)-1); } /* Caller prevents this from being called after init */ static void * __init_refok mca_bootmem(void) { return __alloc_bootmem(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE, 0); } /* Do per-CPU MCA-related initialization. */ void __cpuinit ia64_mca_cpu_init(void *cpu_data) { void *pal_vaddr; void *data; long sz = sizeof(struct ia64_mca_cpu); int cpu = smp_processor_id(); static int first_time = 1; /* * Structure will already be allocated if cpu has been online, * then offlined. */ if (__per_cpu_mca[cpu]) { data = __va(__per_cpu_mca[cpu]); } else { if (first_time) { data = mca_bootmem(); first_time = 0; } else data = (void *)__get_free_pages(GFP_KERNEL, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); } format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack), "MCA", cpu); format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), "INIT", cpu); __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data); /* * Stash away a copy of the PTE needed to map the per-CPU page. * We may need it during MCA recovery. */ __get_cpu_var(ia64_mca_per_cpu_pte) = pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); /* * Also, stash away a copy of the PAL address and the PTE * needed to map it. */ pal_vaddr = efi_get_pal_addr(); if (!pal_vaddr) return; __get_cpu_var(ia64_mca_pal_base) = GRANULEROUNDDOWN((unsigned long) pal_vaddr); __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), PAGE_KERNEL)); } static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy) { unsigned long flags; local_irq_save(flags); if (!cmc_polling_enabled) ia64_mca_cmc_vector_enable(NULL); local_irq_restore(flags); } static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, NULL, 0); break; } return NOTIFY_OK; } static struct notifier_block mca_cpu_notifier __cpuinitdata = { .notifier_call = mca_cpu_callback }; /* * ia64_mca_init * * Do all the system level mca specific initialization. * * 1. Register spinloop and wakeup request interrupt vectors * * 2. Register OS_MCA handler entry point * * 3. Register OS_INIT handler entry point * * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. * * Note that this initialization is done very early before some kernel * services are available. * * Inputs : None * * Outputs : None */ void __init ia64_mca_init(void) { ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; long rc; struct ia64_sal_retval isrv; unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ static struct notifier_block default_init_monarch_nb = { .notifier_call = default_monarch_init_process, .priority = 0/* we need to notified last */ }; IA64_MCA_DEBUG("%s: begin\n", __func__); /* Clear the Rendez checkin flag for all cpus */ for(i = 0 ; i < NR_CPUS; i++) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* * Register the rendezvous spinloop and wakeup mechanism with SAL */ /* Register the rendezvous interrupt vector with SAL */ while (1) { isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_RENDEZ_VECTOR, timeout, SAL_MC_PARAM_RZ_ALWAYS); rc = isrv.status; if (rc == 0) break; if (rc == -2) { printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0); continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " "with SAL (status %ld)\n", rc); return; } /* Register the wakeup interrupt vector with SAL */ isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_WAKEUP_VECTOR, 0, 0); rc = isrv.status; if (rc) { printk(KERN_ERR "Failed to register wakeup interrupt with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { printk(KERN_ERR "Failed to register OS MCA handler with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * size of the actual init handler in mca_asm.S. */ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__, ia64_mc_info.imi_monarch_init_handler); /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " "(status %ld)\n", rc); return; } if (register_die_notifier(&default_init_monarch_nb)) { printk(KERN_ERR "Failed to register default monarch INIT process\n"); return; } IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); /* Initialize the areas set aside by the OS to buffer the * platform/processor error states for MCA/INIT/CMC * handling. */ ia64_log_init(SAL_INFO_TYPE_MCA); ia64_log_init(SAL_INFO_TYPE_INIT); ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CPE); mca_init = 1; printk(KERN_INFO "MCA related initialization done\n"); } /* * ia64_mca_late_init * * Opportunity to setup things that require initialization later * than ia64_mca_init. Setup a timer to poll for CPEs if the * platform doesn't support an interrupt driven mechanism. * * Inputs : None * Outputs : Status */ static int __init ia64_mca_late_init(void) { if (!mca_init) return 0; /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). */ register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ /* Setup the MCA rendezvous interrupt vector */ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); /* Setup the MCA wakeup interrupt vector */ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); #ifdef CONFIG_ACPI /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif register_hotcpu_notifier(&mca_cpu_notifier); /* Setup the CMCI/P vector and handler */ init_timer(&cmc_poll_timer); cmc_poll_timer.function = ia64_mca_cmc_poll; /* Unmask/enable the vector */ cmc_polling_enabled = 0; schedule_work(&cmc_enable_work); IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); #ifdef CONFIG_ACPI /* Setup the CPEI/P vector and handler */ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); init_timer(&cpe_poll_timer); cpe_poll_timer.function = ia64_mca_cpe_poll; { unsigned int irq; if (cpe_vector >= 0) { /* If platform supports CPEI, enable the irq. */ irq = local_vector_to_irq(cpe_vector); if (irq > 0) { cpe_poll_enabled = 0; irq_set_status_flags(irq, IRQ_PER_CPU); setup_irq(irq, &mca_cpe_irqaction); ia64_cpe_irq = irq; ia64_mca_register_cpev(cpe_vector); IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __func__); return 0; } printk(KERN_ERR "%s: Failed to find irq for CPE " "interrupt handler, vector %d\n", __func__, cpe_vector); } /* If platform doesn't support CPEI, get the timer going. */ if (cpe_poll_enabled) { ia64_mca_cpe_poll(0UL); IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__); } } #endif return 0; } device_initcall(ia64_mca_late_init);
gpl-2.0
utilite-computer/linux-kernel
drivers/input/keyboard/xtkbd.c
2652
4599
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * XT keyboard driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/slab.h> #include <linux/module.h> #include <linux/input.h> #include <linux/init.h> #include <linux/serio.h> #define DRIVER_DESC "XT keyboard driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define XTKBD_EMUL0 0xe0 #define XTKBD_EMUL1 0xe1 #define XTKBD_KEY 0x7f #define XTKBD_RELEASE 0x80 static unsigned char xtkbd_keycode[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 0, 0, 0, 87, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 87, 88, 0, 0, 0, 0,110,111,103,108,105, 106 }; struct xtkbd { unsigned char keycode[256]; struct input_dev *dev; struct serio *serio; char phys[32]; }; static irqreturn_t xtkbd_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct xtkbd *xtkbd = serio_get_drvdata(serio); switch (data) { case XTKBD_EMUL0: case XTKBD_EMUL1: break; default: if (xtkbd->keycode[data & XTKBD_KEY]) { input_report_key(xtkbd->dev, xtkbd->keycode[data & XTKBD_KEY], !(data & XTKBD_RELEASE)); input_sync(xtkbd->dev); } else { printk(KERN_WARNING "xtkbd.c: Unknown key (scancode %#x) %s.\n", data & XTKBD_KEY, data & XTKBD_RELEASE ? "released" : "pressed"); } } return IRQ_HANDLED; } static int xtkbd_connect(struct serio *serio, struct serio_driver *drv) { struct xtkbd *xtkbd; struct input_dev *input_dev; int err = -ENOMEM; int i; xtkbd = kmalloc(sizeof(struct xtkbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!xtkbd || !input_dev) goto fail1; xtkbd->serio = serio; xtkbd->dev = input_dev; snprintf(xtkbd->phys, sizeof(xtkbd->phys), "%s/input0", serio->phys); memcpy(xtkbd->keycode, xtkbd_keycode, sizeof(xtkbd->keycode)); input_dev->name = "XT Keyboard"; input_dev->phys = xtkbd->phys; input_dev->id.bustype = BUS_XTKBD; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycode = xtkbd->keycode; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(xtkbd_keycode); for (i = 0; i < 255; i++) set_bit(xtkbd->keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); serio_set_drvdata(serio, xtkbd); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(xtkbd->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(xtkbd); return err; } static void xtkbd_disconnect(struct serio *serio) { struct xtkbd *xtkbd = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(xtkbd->dev); kfree(xtkbd); } static struct serio_device_id xtkbd_serio_ids[] = { { .type = SERIO_XT, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, xtkbd_serio_ids); static struct serio_driver xtkbd_drv = { .driver = { .name = "xtkbd", }, .description = DRIVER_DESC, .id_table = xtkbd_serio_ids, .interrupt = xtkbd_interrupt, .connect = xtkbd_connect, .disconnect = xtkbd_disconnect, }; module_serio_driver(xtkbd_drv);
gpl-2.0
ManishBadarkhe/linux-next
drivers/input/joystick/spaceorb.c
2652
6490
/* * Copyright (c) 1999-2001 Vojtech Pavlik * * Based on the work of: * David Thompson */ /* * SpaceTec SpaceOrb 360 and Avenger 6dof controller driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serio.h> #define DRIVER_DESC "SpaceTec SpaceOrb 360 and Avenger 6dof controller driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define SPACEORB_MAX_LENGTH 64 static int spaceorb_buttons[] = { BTN_TL, BTN_TR, BTN_Y, BTN_X, BTN_B, BTN_A }; static int spaceorb_axes[] = { ABS_X, ABS_Y, ABS_Z, ABS_RX, ABS_RY, ABS_RZ }; /* * Per-Orb data. */ struct spaceorb { struct input_dev *dev; int idx; unsigned char data[SPACEORB_MAX_LENGTH]; char phys[32]; }; static unsigned char spaceorb_xor[] = "SpaceWare"; static unsigned char *spaceorb_errors[] = { "EEPROM storing 0 failed", "Receive queue overflow", "Transmit queue timeout", "Bad packet", "Power brown-out", "EEPROM checksum error", "Hardware fault" }; /* * spaceorb_process_packet() decodes packets the driver receives from the * SpaceOrb. */ static void spaceorb_process_packet(struct spaceorb *spaceorb) { struct input_dev *dev = spaceorb->dev; unsigned char *data = spaceorb->data; unsigned char c = 0; int axes[6]; int i; if (spaceorb->idx < 2) return; for (i = 0; i < spaceorb->idx; i++) c ^= data[i]; if (c) return; switch (data[0]) { case 'R': /* Reset packet */ spaceorb->data[spaceorb->idx - 1] = 0; for (i = 1; i < spaceorb->idx && spaceorb->data[i] == ' '; i++); printk(KERN_INFO "input: %s [%s] is %s\n", dev->name, spaceorb->data + i, spaceorb->phys); break; case 'D': /* Ball + button data */ if (spaceorb->idx != 12) return; for (i = 0; i < 9; i++) spaceorb->data[i+2] ^= spaceorb_xor[i]; axes[0] = ( data[2] << 3) | (data[ 3] >> 4); axes[1] = ((data[3] & 0x0f) << 6) | (data[ 4] >> 1); axes[2] = ((data[4] & 0x01) << 9) | (data[ 5] << 2) | (data[4] >> 5); axes[3] = ((data[6] & 0x1f) << 5) | (data[ 7] >> 2); axes[4] = ((data[7] & 0x03) << 8) | (data[ 8] << 1) | (data[7] >> 6); axes[5] = ((data[9] & 0x3f) << 4) | (data[10] >> 3); for (i = 0; i < 6; i++) input_report_abs(dev, spaceorb_axes[i], axes[i] - ((axes[i] & 0x200) ? 1024 : 0)); for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[1] >> i) & 1); break; case 'K': /* Button data */ if (spaceorb->idx != 5) return; for (i = 0; i < 6; i++) input_report_key(dev, spaceorb_buttons[i], (data[2] >> i) & 1); break; case 'E': /* Error packet */ if (spaceorb->idx != 4) return; printk(KERN_ERR "spaceorb: Device error. [ "); for (i = 0; i < 7; i++) if (data[1] & (1 << i)) printk("%s ", spaceorb_errors[i]); printk("]\n"); break; } input_sync(dev); } static irqreturn_t spaceorb_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct spaceorb* spaceorb = serio_get_drvdata(serio); if (~data & 0x80) { if (spaceorb->idx) spaceorb_process_packet(spaceorb); spaceorb->idx = 0; } if (spaceorb->idx < SPACEORB_MAX_LENGTH) spaceorb->data[spaceorb->idx++] = data & 0x7f; return IRQ_HANDLED; } /* * spaceorb_disconnect() is the opposite of spaceorb_connect() */ static void spaceorb_disconnect(struct serio *serio) { struct spaceorb* spaceorb = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(spaceorb->dev); kfree(spaceorb); } /* * spaceorb_connect() is the routine that is called when someone adds a * new serio device that supports SpaceOrb/Avenger protocol and registers * it as an input device. */ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv) { struct spaceorb *spaceorb; struct input_dev *input_dev; int err = -ENOMEM; int i; spaceorb = kzalloc(sizeof(struct spaceorb), GFP_KERNEL); input_dev = input_allocate_device(); if (!spaceorb || !input_dev) goto fail1; spaceorb->dev = input_dev; snprintf(spaceorb->phys, sizeof(spaceorb->phys), "%s/input0", serio->phys); input_dev->name = "SpaceTec SpaceOrb 360 / Avenger"; input_dev->phys = spaceorb->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_SPACEORB; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); for (i = 0; i < 6; i++) set_bit(spaceorb_buttons[i], input_dev->keybit); for (i = 0; i < 6; i++) input_set_abs_params(input_dev, spaceorb_axes[i], -508, 508, 0, 0); serio_set_drvdata(serio, spaceorb); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(spaceorb->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(spaceorb); return err; } /* * The serio driver structure. */ static struct serio_device_id spaceorb_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_SPACEORB, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, spaceorb_serio_ids); static struct serio_driver spaceorb_drv = { .driver = { .name = "spaceorb", }, .description = DRIVER_DESC, .id_table = spaceorb_serio_ids, .interrupt = spaceorb_interrupt, .connect = spaceorb_connect, .disconnect = spaceorb_disconnect, }; module_serio_driver(spaceorb_drv);
gpl-2.0
aatjitra/PR26
drivers/media/radio/radio-aimslab.c
2652
10995
/* radiotrack (radioreveal) driver for Linux radio support * (c) 1997 M. Kirkwood * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * History: * 1999-02-24 Russell Kroll <rkroll@exploits.org> * Fine tuning/VIDEO_TUNER_LOW * Frequency range expanded to start at 87 MHz * * TODO: Allow for more than one of these foolish entities :-) * * Notes on the hardware (reverse engineered from other peoples' * reverse engineering of AIMS' code :-) * * Frequency control is done digitally -- ie out(port,encodefreq(95.8)); * * The signal strength query is unsurprisingly inaccurate. And it seems * to indicate that (on my card, at least) the frequency setting isn't * too great. (I have to tune up .025MHz from what the freq should be * to get a report that the thing is tuned.) * * Volume control is (ugh) analogue: * out(port, start_increasing_volume); * wait(a_wee_while); * out(port, stop_changing_the_volume); * */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* msleep */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/version.h> /* for KERNEL_VERSION MACRO */ #include <linux/io.h> /* outb, outb_p */ #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> MODULE_AUTHOR("M.Kirkwood"); MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card."); MODULE_LICENSE("GPL"); #ifndef CONFIG_RADIO_RTRACK_PORT #define CONFIG_RADIO_RTRACK_PORT -1 #endif static int io = CONFIG_RADIO_RTRACK_PORT; static int radio_nr = -1; module_param(io, int, 0); MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)"); module_param(radio_nr, int, 0); #define RADIO_VERSION KERNEL_VERSION(0, 0, 2) struct rtrack { struct v4l2_device v4l2_dev; struct video_device vdev; int port; int curvol; unsigned long curfreq; int muted; int io; struct mutex lock; }; static struct rtrack rtrack_card; /* local things */ static void rt_decvol(struct rtrack *rt) { outb(0x58, rt->io); /* volume down + sigstr + on */ msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } static void rt_incvol(struct rtrack *rt) { outb(0x98, rt->io); /* volume up + sigstr + on */ msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } static void rt_mute(struct rtrack *rt) { rt->muted = 1; mutex_lock(&rt->lock); outb(0xd0, rt->io); /* volume steady, off */ mutex_unlock(&rt->lock); } static int rt_setvol(struct rtrack *rt, int vol) { int i; mutex_lock(&rt->lock); if (vol == rt->curvol) { /* requested volume = current */ if (rt->muted) { /* user is unmuting the card */ rt->muted = 0; outb(0xd8, rt->io); /* enable card */ } mutex_unlock(&rt->lock); return 0; } if (vol == 0) { /* volume = 0 means mute the card */ outb(0x48, rt->io); /* volume down but still "on" */ msleep(2000); /* make sure it's totally down */ outb(0xd0, rt->io); /* volume steady, off */ rt->curvol = 0; /* track the volume state! */ mutex_unlock(&rt->lock); return 0; } rt->muted = 0; if (vol > rt->curvol) for (i = rt->curvol; i < vol; i++) rt_incvol(rt); else for (i = rt->curvol; i > vol; i--) rt_decvol(rt); rt->curvol = vol; mutex_unlock(&rt->lock); return 0; } /* the 128+64 on these outb's is to keep the volume stable while tuning * without them, the volume _will_ creep up with each frequency change * and bit 4 (+16) is to keep the signal strength meter enabled */ static void send_0_byte(struct rtrack *rt) { if (rt->curvol == 0 || rt->muted) { outb_p(128+64+16+ 1, rt->io); /* wr-enable + data low */ outb_p(128+64+16+2+1, rt->io); /* clock */ } else { outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */ outb_p(128+64+16+8+2+1, rt->io); /* clock */ } msleep(1); } static void send_1_byte(struct rtrack *rt) { if (rt->curvol == 0 || rt->muted) { outb_p(128+64+16+4 +1, rt->io); /* wr-enable+data high */ outb_p(128+64+16+4+2+1, rt->io); /* clock */ } else { outb_p(128+64+16+8+4 +1, rt->io); /* on+wr-enable+data high */ outb_p(128+64+16+8+4+2+1, rt->io); /* clock */ } msleep(1); } static int rt_setfreq(struct rtrack *rt, unsigned long freq) { int i; mutex_lock(&rt->lock); /* Stop other ops interfering */ rt->curfreq = freq; /* now uses VIDEO_TUNER_LOW for fine tuning */ freq += 171200; /* Add 10.7 MHz IF */ freq /= 800; /* Convert to 50 kHz units */ send_0_byte(rt); /* 0: LSB of frequency */ for (i = 0; i < 13; i++) /* : frequency bits (1-13) */ if (freq & (1 << i)) send_1_byte(rt); else send_0_byte(rt); send_0_byte(rt); /* 14: test bit - always 0 */ send_0_byte(rt); /* 15: test bit - always 0 */ send_0_byte(rt); /* 16: band data 0 - always 0 */ send_0_byte(rt); /* 17: band data 1 - always 0 */ send_0_byte(rt); /* 18: band data 2 - always 0 */ send_0_byte(rt); /* 19: time base - always 0 */ send_0_byte(rt); /* 20: spacing (0 = 25 kHz) */ send_1_byte(rt); /* 21: spacing (1 = 25 kHz) */ send_0_byte(rt); /* 22: spacing (0 = 25 kHz) */ send_1_byte(rt); /* 23: AM/FM (FM = 1, always) */ if (rt->curvol == 0 || rt->muted) outb(0xd0, rt->io); /* volume steady + sigstr */ else outb(0xd8, rt->io); /* volume steady + sigstr + on */ mutex_unlock(&rt->lock); return 0; } static int rt_getsigstr(struct rtrack *rt) { int sig = 1; mutex_lock(&rt->lock); if (inb(rt->io) & 2) /* bit set = no signal present */ sig = 0; mutex_unlock(&rt->lock); return sig; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { strlcpy(v->driver, "radio-aimslab", sizeof(v->driver)); strlcpy(v->card, "RadioTrack", sizeof(v->card)); strlcpy(v->bus_info, "ISA", sizeof(v->bus_info)); v->version = RADIO_VERSION; v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct rtrack *rt = video_drvdata(file); if (v->index > 0) return -EINVAL; strlcpy(v->name, "FM", sizeof(v->name)); v->type = V4L2_TUNER_RADIO; v->rangelow = 87 * 16000; v->rangehigh = 108 * 16000; v->rxsubchans = V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW; v->audmode = V4L2_TUNER_MODE_MONO; v->signal = 0xffff * rt_getsigstr(rt); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct rtrack *rt = video_drvdata(file); if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; rt_setfreq(rt, f->frequency); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct rtrack *rt = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = rt->curfreq; return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { switch (qc->id) { case V4L2_CID_AUDIO_MUTE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1); case V4L2_CID_AUDIO_VOLUME: return v4l2_ctrl_query_fill(qc, 0, 0xff, 1, 0xff); } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct rtrack *rt = video_drvdata(file); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value = rt->muted; return 0; case V4L2_CID_AUDIO_VOLUME: ctrl->value = rt->curvol; return 0; } return -EINVAL; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct rtrack *rt = video_drvdata(file); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (ctrl->value) rt_mute(rt); else rt_setvol(rt, rt->curvol); return 0; case V4L2_CID_AUDIO_VOLUME: rt_setvol(rt, ctrl->value); return 0; } return -EINVAL; } static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_input(struct file *filp, void *priv, unsigned int i) { return i ? -EINVAL : 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { a->index = 0; strlcpy(a->name, "Radio", sizeof(a->name)); a->capability = V4L2_AUDCAP_STEREO; return 0; } static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { return a->index ? -EINVAL : 0; } static const struct v4l2_file_operations rtrack_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops rtrack_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, }; static int __init rtrack_init(void) { struct rtrack *rt = &rtrack_card; struct v4l2_device *v4l2_dev = &rt->v4l2_dev; int res; strlcpy(v4l2_dev->name, "rtrack", sizeof(v4l2_dev->name)); rt->io = io; if (rt->io == -1) { v4l2_err(v4l2_dev, "you must set an I/O address with io=0x20f or 0x30f\n"); return -EINVAL; } if (!request_region(rt->io, 2, "rtrack")) { v4l2_err(v4l2_dev, "port 0x%x already in use\n", rt->io); return -EBUSY; } res = v4l2_device_register(NULL, v4l2_dev); if (res < 0) { release_region(rt->io, 2); v4l2_err(v4l2_dev, "could not register v4l2_device\n"); return res; } strlcpy(rt->vdev.name, v4l2_dev->name, sizeof(rt->vdev.name)); rt->vdev.v4l2_dev = v4l2_dev; rt->vdev.fops = &rtrack_fops; rt->vdev.ioctl_ops = &rtrack_ioctl_ops; rt->vdev.release = video_device_release_empty; video_set_drvdata(&rt->vdev, rt); /* Set up the I/O locking */ mutex_init(&rt->lock); /* mute card - prevents noisy bootups */ /* this ensures that the volume is all the way down */ outb(0x48, rt->io); /* volume down but still "on" */ msleep(2000); /* make sure it's totally down */ outb(0xc0, rt->io); /* steady volume, mute card */ if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(&rt->v4l2_dev); release_region(rt->io, 2); return -EINVAL; } v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); return 0; } static void __exit rtrack_exit(void) { struct rtrack *rt = &rtrack_card; video_unregister_device(&rt->vdev); v4l2_device_unregister(&rt->v4l2_dev); release_region(rt->io, 2); } module_init(rtrack_init); module_exit(rtrack_exit);
gpl-2.0
ysat0/linux-ysato
drivers/gpu/drm/i915/intel_modes.c
3932
3776
/* * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2007, 2010 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/fb.h> #include <drm/drm_edid.h> #include <drm/drmP.h> #include "intel_drv.h" #include "i915_drv.h" /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use * @edid: previously read EDID information */ int intel_connector_update_modes(struct drm_connector *connector, struct edid *edid) { int ret; drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); return ret; } /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use * @adapter: i2c adapter * * Fetch the EDID information from @connector using the DDC bus. */ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; int ret; edid = drm_get_edid(connector, adapter); if (!edid) return 0; ret = intel_connector_update_modes(connector, edid); kfree(edid); return ret; } static const struct drm_prop_enum_list force_audio_names[] = { { HDMI_AUDIO_OFF_DVI, "force-dvi" }, { HDMI_AUDIO_OFF, "off" }, { HDMI_AUDIO_AUTO, "auto" }, { HDMI_AUDIO_ON, "on" }, }; void intel_attach_force_audio_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_property *prop; prop = dev_priv->force_audio_property; if (prop == NULL) { prop = drm_property_create_enum(dev, 0, "audio", force_audio_names, ARRAY_SIZE(force_audio_names)); if (prop == NULL) return; dev_priv->force_audio_property = prop; } drm_object_attach_property(&connector->base, prop, 0); } static const struct drm_prop_enum_list broadcast_rgb_names[] = { { INTEL_BROADCAST_RGB_AUTO, "Automatic" }, { INTEL_BROADCAST_RGB_FULL, "Full" }, { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, }; void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_property *prop; prop = dev_priv->broadcast_rgb_property; if (prop == NULL) { prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Broadcast RGB", broadcast_rgb_names, ARRAY_SIZE(broadcast_rgb_names)); if (prop == NULL) return; dev_priv->broadcast_rgb_property = prop; } drm_object_attach_property(&connector->base, prop, 0); }
gpl-2.0
AICP/kernel_htc_m7
drivers/media/video/gspca/mr97310a.c
4956
34428
/* * Mars MR97310A library * * The original mr97310a driver, which supported the Aiptek Pencam VGA+, is * Copyright (C) 2009 Kyle Guinn <elyk03@gmail.com> * * Support for the MR97310A cameras in addition to the Aiptek Pencam VGA+ * and for the routines for detecting and classifying these various cameras, * is Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu> * * Support for the control settings for the CIF cameras is * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> and * Thomas Kaiser <thomas@kaiser-linux.li> * * Support for the control settings for the VGA cameras is * Copyright (C) 2009 Theodore Kilgore <kilgota@auburn.edu> * * Several previously unsupported cameras are owned and have been tested by * Hans de Goede <hdegoede@redhat.com> and * Thomas Kaiser <thomas@kaiser-linux.li> and * Theodore Kilgore <kilgota@auburn.edu> and * Edmond Rodriguez <erodrig_97@yahoo.com> and * Aurelien Jacobs <aurel@gnuage.org> * * The MR97311A support in gspca/mars.c has been helpful in understanding some * of the registers in these cameras. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "mr97310a" #include "gspca.h" #define CAM_TYPE_CIF 0 #define CAM_TYPE_VGA 1 #define MR97310A_BRIGHTNESS_DEFAULT 0 #define MR97310A_EXPOSURE_MIN 0 #define MR97310A_EXPOSURE_MAX 4095 #define MR97310A_EXPOSURE_DEFAULT 1000 #define MR97310A_GAIN_MIN 0 #define MR97310A_GAIN_MAX 31 #define MR97310A_GAIN_DEFAULT 25 #define MR97310A_CONTRAST_MIN 0 #define MR97310A_CONTRAST_MAX 31 #define MR97310A_CONTRAST_DEFAULT 23 #define MR97310A_CS_GAIN_MIN 0 #define MR97310A_CS_GAIN_MAX 0x7ff #define MR97310A_CS_GAIN_DEFAULT 0x110 #define MR97310A_MIN_CLOCKDIV_MIN 3 #define MR97310A_MIN_CLOCKDIV_MAX 8 #define MR97310A_MIN_CLOCKDIV_DEFAULT 3 MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>," "Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver"); MODULE_LICENSE("GPL"); /* global parameters */ static int force_sensor_type = -1; module_param(force_sensor_type, int, 0644); MODULE_PARM_DESC(force_sensor_type, "Force sensor type (-1 (auto), 0 or 1)"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 sof_read; u8 cam_type; /* 0 is CIF and 1 is VGA */ u8 sensor_type; /* We use 0 and 1 here, too. */ u8 do_lcd_stop; u8 adj_colors; int brightness; u16 exposure; u32 gain; u8 contrast; u8 min_clockdiv; }; struct sensor_w_data { u8 reg; u8 flags; u8 data[16]; int len; }; static void sd_stopN(struct gspca_dev *gspca_dev); static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val); static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val); static int sd_getmin_clockdiv(struct gspca_dev *gspca_dev, __s32 *val); static void setbrightness(struct gspca_dev *gspca_dev); static void setexposure(struct gspca_dev *gspca_dev); static void setgain(struct gspca_dev *gspca_dev); static void setcontrast(struct gspca_dev *gspca_dev); /* V4L2 controls supported by the driver */ static const struct ctrl sd_ctrls[] = { /* Separate brightness control description for Argus QuickClix as it has * different limits from the other mr97310a cameras, and separate gain * control for Sakar CyberPix camera. */ { #define NORM_BRIGHTNESS_IDX 0 { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = -254, .maximum = 255, .step = 1, .default_value = MR97310A_BRIGHTNESS_DEFAULT, .flags = 0, }, .set = sd_setbrightness, .get = sd_getbrightness, }, { #define ARGUS_QC_BRIGHTNESS_IDX 1 { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 15, .step = 1, .default_value = MR97310A_BRIGHTNESS_DEFAULT, .flags = 0, }, .set = sd_setbrightness, .get = sd_getbrightness, }, { #define EXPOSURE_IDX 2 { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = MR97310A_EXPOSURE_MIN, .maximum = MR97310A_EXPOSURE_MAX, .step = 1, .default_value = MR97310A_EXPOSURE_DEFAULT, .flags = 0, }, .set = sd_setexposure, .get = sd_getexposure, }, { #define GAIN_IDX 3 { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = MR97310A_GAIN_MIN, .maximum = MR97310A_GAIN_MAX, .step = 1, .default_value = MR97310A_GAIN_DEFAULT, .flags = 0, }, .set = sd_setgain, .get = sd_getgain, }, { #define SAKAR_CS_GAIN_IDX 4 { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = MR97310A_CS_GAIN_MIN, .maximum = MR97310A_CS_GAIN_MAX, .step = 1, .default_value = MR97310A_CS_GAIN_DEFAULT, .flags = 0, }, .set = sd_setgain, .get = sd_getgain, }, { #define CONTRAST_IDX 5 { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = MR97310A_CONTRAST_MIN, .maximum = MR97310A_CONTRAST_MAX, .step = 1, .default_value = MR97310A_CONTRAST_DEFAULT, .flags = 0, }, .set = sd_setcontrast, .get = sd_getcontrast, }, { #define MIN_CLOCKDIV_IDX 6 { .id = V4L2_CID_PRIVATE_BASE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Minimum Clock Divider", .minimum = MR97310A_MIN_CLOCKDIV_MIN, .maximum = MR97310A_MIN_CLOCKDIV_MAX, .step = 1, .default_value = MR97310A_MIN_CLOCKDIV_DEFAULT, .flags = 0, }, .set = sd_setmin_clockdiv, .get = sd_getmin_clockdiv, }, }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 4}, {176, 144, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {320, 240, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {352, 288, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /* the bytes to write are in gspca_dev->usb_buf */ static int mr_write(struct gspca_dev *gspca_dev, int len) { int rc; rc = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 4), gspca_dev->usb_buf, len, NULL, 500); if (rc < 0) pr_err("reg write [%02x] error %d\n", gspca_dev->usb_buf[0], rc); return rc; } /* the bytes are read into gspca_dev->usb_buf */ static int mr_read(struct gspca_dev *gspca_dev, int len) { int rc; rc = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, len, NULL, 500); if (rc < 0) pr_err("reg read [%02x] error %d\n", gspca_dev->usb_buf[0], rc); return rc; } static int sensor_write_reg(struct gspca_dev *gspca_dev, u8 reg, u8 flags, const u8 *data, int len) { gspca_dev->usb_buf[0] = 0x1f; gspca_dev->usb_buf[1] = flags; gspca_dev->usb_buf[2] = reg; memcpy(gspca_dev->usb_buf + 3, data, len); return mr_write(gspca_dev, len + 3); } static int sensor_write_regs(struct gspca_dev *gspca_dev, const struct sensor_w_data *data, int len) { int i, rc; for (i = 0; i < len; i++) { rc = sensor_write_reg(gspca_dev, data[i].reg, data[i].flags, data[i].data, data[i].len); if (rc < 0) return rc; } return 0; } static int sensor_write1(struct gspca_dev *gspca_dev, u8 reg, u8 data) { struct sd *sd = (struct sd *) gspca_dev; u8 buf, confirm_reg; int rc; buf = data; if (sd->cam_type == CAM_TYPE_CIF) { rc = sensor_write_reg(gspca_dev, reg, 0x01, &buf, 1); confirm_reg = sd->sensor_type ? 0x13 : 0x11; } else { rc = sensor_write_reg(gspca_dev, reg, 0x00, &buf, 1); confirm_reg = 0x11; } if (rc < 0) return rc; buf = 0x01; rc = sensor_write_reg(gspca_dev, confirm_reg, 0x00, &buf, 1); if (rc < 0) return rc; return 0; } static int cam_get_response16(struct gspca_dev *gspca_dev, u8 reg, int verbose) { int err_code; gspca_dev->usb_buf[0] = reg; err_code = mr_write(gspca_dev, 1); if (err_code < 0) return err_code; err_code = mr_read(gspca_dev, 16); if (err_code < 0) return err_code; if (verbose) PDEBUG(D_PROBE, "Register: %02x reads %02x%02x%02x", reg, gspca_dev->usb_buf[0], gspca_dev->usb_buf[1], gspca_dev->usb_buf[2]); return 0; } static int zero_the_pointer(struct gspca_dev *gspca_dev) { __u8 *data = gspca_dev->usb_buf; int err_code; u8 status = 0; int tries = 0; err_code = cam_get_response16(gspca_dev, 0x21, 0); if (err_code < 0) return err_code; data[0] = 0x19; data[1] = 0x51; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; err_code = cam_get_response16(gspca_dev, 0x21, 0); if (err_code < 0) return err_code; data[0] = 0x19; data[1] = 0xba; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; err_code = cam_get_response16(gspca_dev, 0x21, 0); if (err_code < 0) return err_code; data[0] = 0x19; data[1] = 0x00; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; err_code = cam_get_response16(gspca_dev, 0x21, 0); if (err_code < 0) return err_code; data[0] = 0x19; data[1] = 0x00; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; while (status != 0x0a && tries < 256) { err_code = cam_get_response16(gspca_dev, 0x21, 0); status = data[0]; tries++; if (err_code < 0) return err_code; } if (status != 0x0a) PDEBUG(D_ERR, "status is %02x", status); tries = 0; while (tries < 4) { data[0] = 0x19; data[1] = 0x00; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; err_code = cam_get_response16(gspca_dev, 0x21, 0); status = data[0]; tries++; if (err_code < 0) return err_code; } data[0] = 0x19; err_code = mr_write(gspca_dev, 1); if (err_code < 0) return err_code; err_code = mr_read(gspca_dev, 16); if (err_code < 0) return err_code; return 0; } static int stream_start(struct gspca_dev *gspca_dev) { gspca_dev->usb_buf[0] = 0x01; gspca_dev->usb_buf[1] = 0x01; return mr_write(gspca_dev, 2); } static void stream_stop(struct gspca_dev *gspca_dev) { gspca_dev->usb_buf[0] = 0x01; gspca_dev->usb_buf[1] = 0x00; if (mr_write(gspca_dev, 2) < 0) PDEBUG(D_ERR, "Stream Stop failed"); } static void lcd_stop(struct gspca_dev *gspca_dev) { gspca_dev->usb_buf[0] = 0x19; gspca_dev->usb_buf[1] = 0x54; if (mr_write(gspca_dev, 2) < 0) PDEBUG(D_ERR, "LCD Stop failed"); } static int isoc_enable(struct gspca_dev *gspca_dev) { gspca_dev->usb_buf[0] = 0x00; gspca_dev->usb_buf[1] = 0x4d; /* ISOC transferring enable... */ return mr_write(gspca_dev, 2); } /* This function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; int gain_default = MR97310A_GAIN_DEFAULT; int err_code; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); sd->do_lcd_stop = 0; /* Several of the supported CIF cameras share the same USB ID but * require different initializations and different control settings. * The same is true of the VGA cameras. Therefore, we are forced * to start the initialization process in order to determine which * camera is present. Some of the supported cameras require the * memory pointer to be set to 0 as the very first item of business * or else they will not stream. So we do that immediately. */ err_code = zero_the_pointer(gspca_dev); if (err_code < 0) return err_code; err_code = stream_start(gspca_dev); if (err_code < 0) return err_code; /* Now, the query for sensor type. */ err_code = cam_get_response16(gspca_dev, 0x07, 1); if (err_code < 0) return err_code; if (id->idProduct == 0x0110 || id->idProduct == 0x010e) { sd->cam_type = CAM_TYPE_CIF; cam->nmodes--; /* * All but one of the known CIF cameras share the same USB ID, * but two different init routines are in use, and the control * settings are different, too. We need to detect which camera * of the two known varieties is connected! * * A list of known CIF cameras follows. They all report either * 0200 for type 0 or 0300 for type 1. * If you have another to report, please do * * Name sd->sensor_type reported by * * Sakar 56379 Spy-shot 0 T. Kilgore * Innovage 0 T. Kilgore * Vivitar Mini 0 H. De Goede * Vivitar Mini 0 E. Rodriguez * Vivitar Mini 1 T. Kilgore * Elta-Media 8212dc 1 T. Kaiser * Philips dig. keych. 1 T. Kilgore * Trust Spyc@m 100 1 A. Jacobs */ switch (gspca_dev->usb_buf[0]) { case 2: sd->sensor_type = 0; break; case 3: sd->sensor_type = 1; break; default: pr_err("Unknown CIF Sensor id : %02x\n", gspca_dev->usb_buf[1]); return -ENODEV; } PDEBUG(D_PROBE, "MR97310A CIF camera detected, sensor: %d", sd->sensor_type); } else { sd->cam_type = CAM_TYPE_VGA; /* * Here is a table of the responses to the query for sensor * type, from the known MR97310A VGA cameras. Six different * cameras of which five share the same USB ID. * * Name gspca_dev->usb_buf[] sd->sensor_type * sd->do_lcd_stop * Aiptek Pencam VGA+ 0300 0 1 * ION digital 0300 0 1 * Argus DC-1620 0450 1 0 * Argus QuickClix 0420 1 1 * Sakar 77379 Digital 0350 0 1 * Sakar 1638x CyberPix 0120 0 2 * * Based upon these results, we assume default settings * and then correct as necessary, as follows. * */ sd->sensor_type = 1; sd->do_lcd_stop = 0; sd->adj_colors = 0; if (gspca_dev->usb_buf[0] == 0x01) { sd->sensor_type = 2; } else if ((gspca_dev->usb_buf[0] != 0x03) && (gspca_dev->usb_buf[0] != 0x04)) { pr_err("Unknown VGA Sensor id Byte 0: %02x\n", gspca_dev->usb_buf[0]); pr_err("Defaults assumed, may not work\n"); pr_err("Please report this\n"); } /* Sakar Digital color needs to be adjusted. */ if ((gspca_dev->usb_buf[0] == 0x03) && (gspca_dev->usb_buf[1] == 0x50)) sd->adj_colors = 1; if (gspca_dev->usb_buf[0] == 0x04) { sd->do_lcd_stop = 1; switch (gspca_dev->usb_buf[1]) { case 0x50: sd->sensor_type = 0; PDEBUG(D_PROBE, "sensor_type corrected to 0"); break; case 0x20: /* Nothing to do here. */ break; default: pr_err("Unknown VGA Sensor id Byte 1: %02x\n", gspca_dev->usb_buf[1]); pr_err("Defaults assumed, may not work\n"); pr_err("Please report this\n"); } } PDEBUG(D_PROBE, "MR97310A VGA camera detected, sensor: %d", sd->sensor_type); } /* Stop streaming as we've started it only to probe the sensor type. */ sd_stopN(gspca_dev); if (force_sensor_type != -1) { sd->sensor_type = !!force_sensor_type; PDEBUG(D_PROBE, "Forcing sensor type to: %d", sd->sensor_type); } /* Setup controls depending on camera type */ if (sd->cam_type == CAM_TYPE_CIF) { /* No brightness for sensor_type 0 */ if (sd->sensor_type == 0) gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | (1 << ARGUS_QC_BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << SAKAR_CS_GAIN_IDX); else gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << SAKAR_CS_GAIN_IDX) | (1 << MIN_CLOCKDIV_IDX); } else { /* All controls need to be disabled if VGA sensor_type is 0 */ if (sd->sensor_type == 0) gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | (1 << ARGUS_QC_BRIGHTNESS_IDX) | (1 << EXPOSURE_IDX) | (1 << GAIN_IDX) | (1 << CONTRAST_IDX) | (1 << SAKAR_CS_GAIN_IDX) | (1 << MIN_CLOCKDIV_IDX); else if (sd->sensor_type == 2) { gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | (1 << ARGUS_QC_BRIGHTNESS_IDX) | (1 << GAIN_IDX) | (1 << MIN_CLOCKDIV_IDX); gain_default = MR97310A_CS_GAIN_DEFAULT; } else if (sd->do_lcd_stop) /* Argus QuickClix has different brightness limits */ gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << SAKAR_CS_GAIN_IDX); else gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << SAKAR_CS_GAIN_IDX); } sd->brightness = MR97310A_BRIGHTNESS_DEFAULT; sd->exposure = MR97310A_EXPOSURE_DEFAULT; sd->gain = gain_default; sd->contrast = MR97310A_CONTRAST_DEFAULT; sd->min_clockdiv = MR97310A_MIN_CLOCKDIV_DEFAULT; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } static int start_cif_cam(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 *data = gspca_dev->usb_buf; int err_code; static const __u8 startup_string[] = { 0x00, 0x0d, 0x01, 0x00, /* Hsize/8 for 352 or 320 */ 0x00, /* Vsize/4 for 288 or 240 */ 0x13, /* or 0xbb, depends on sensor */ 0x00, /* Hstart, depends on res. */ 0x00, /* reserved ? */ 0x00, /* Vstart, depends on res. and sensor */ 0x50, /* 0x54 to get 176 or 160 */ 0xc0 }; /* Note: Some of the above descriptions guessed from MR97113A driver */ memcpy(data, startup_string, 11); if (sd->sensor_type) data[5] = 0xbb; switch (gspca_dev->width) { case 160: data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */ /* fall thru */ case 320: default: data[3] = 0x28; /* reg 2, H size/8 */ data[4] = 0x3c; /* reg 3, V size/4 */ data[6] = 0x14; /* reg 5, H start */ data[8] = 0x1a + sd->sensor_type; /* reg 7, V start */ break; case 176: data[9] |= 0x04; /* reg 8, 2:1 scale down from 352 */ /* fall thru */ case 352: data[3] = 0x2c; /* reg 2, H size/8 */ data[4] = 0x48; /* reg 3, V size/4 */ data[6] = 0x06; /* reg 5, H start */ data[8] = 0x06 - sd->sensor_type; /* reg 7, V start */ break; } err_code = mr_write(gspca_dev, 11); if (err_code < 0) return err_code; if (!sd->sensor_type) { static const struct sensor_w_data cif_sensor0_init_data[] = { {0x02, 0x00, {0x03, 0x5a, 0xb5, 0x01, 0x0f, 0x14, 0x0f, 0x10}, 8}, {0x0c, 0x00, {0x04, 0x01, 0x01, 0x00, 0x1f}, 5}, {0x12, 0x00, {0x07}, 1}, {0x1f, 0x00, {0x06}, 1}, {0x27, 0x00, {0x04}, 1}, {0x29, 0x00, {0x0c}, 1}, {0x40, 0x00, {0x40, 0x00, 0x04}, 3}, {0x50, 0x00, {0x60}, 1}, {0x60, 0x00, {0x06}, 1}, {0x6b, 0x00, {0x85, 0x85, 0xc8, 0xc8, 0xc8, 0xc8}, 6}, {0x72, 0x00, {0x1e, 0x56}, 2}, {0x75, 0x00, {0x58, 0x40, 0xa2, 0x02, 0x31, 0x02, 0x31, 0x80, 0x00}, 9}, {0x11, 0x00, {0x01}, 1}, {0, 0, {0}, 0} }; err_code = sensor_write_regs(gspca_dev, cif_sensor0_init_data, ARRAY_SIZE(cif_sensor0_init_data)); } else { /* sd->sensor_type = 1 */ static const struct sensor_w_data cif_sensor1_init_data[] = { /* Reg 3,4, 7,8 get set by the controls */ {0x02, 0x00, {0x10}, 1}, {0x05, 0x01, {0x22}, 1}, /* 5/6 also seen as 65h/32h */ {0x06, 0x01, {0x00}, 1}, {0x09, 0x02, {0x0e}, 1}, {0x0a, 0x02, {0x05}, 1}, {0x0b, 0x02, {0x05}, 1}, {0x0c, 0x02, {0x0f}, 1}, {0x0d, 0x02, {0x07}, 1}, {0x0e, 0x02, {0x0c}, 1}, {0x0f, 0x00, {0x00}, 1}, {0x10, 0x00, {0x06}, 1}, {0x11, 0x00, {0x07}, 1}, {0x12, 0x00, {0x00}, 1}, {0x13, 0x00, {0x01}, 1}, {0, 0, {0}, 0} }; /* Without this command the cam won't work with USB-UHCI */ gspca_dev->usb_buf[0] = 0x0a; gspca_dev->usb_buf[1] = 0x00; err_code = mr_write(gspca_dev, 2); if (err_code < 0) return err_code; err_code = sensor_write_regs(gspca_dev, cif_sensor1_init_data, ARRAY_SIZE(cif_sensor1_init_data)); } return err_code; } static int start_vga_cam(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; __u8 *data = gspca_dev->usb_buf; int err_code; static const __u8 startup_string[] = {0x00, 0x0d, 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x50, 0xc0}; /* What some of these mean is explained in start_cif_cam(), above */ memcpy(data, startup_string, 11); if (!sd->sensor_type) { data[5] = 0x00; data[10] = 0x91; } if (sd->sensor_type == 2) { data[5] = 0x00; data[10] = 0x18; } switch (gspca_dev->width) { case 160: data[9] |= 0x0c; /* reg 8, 4:1 scale down */ /* fall thru */ case 320: data[9] |= 0x04; /* reg 8, 2:1 scale down */ /* fall thru */ case 640: default: data[3] = 0x50; /* reg 2, H size/8 */ data[4] = 0x78; /* reg 3, V size/4 */ data[6] = 0x04; /* reg 5, H start */ data[8] = 0x03; /* reg 7, V start */ if (sd->sensor_type == 2) { data[6] = 2; data[8] = 1; } if (sd->do_lcd_stop) data[8] = 0x04; /* Bayer tile shifted */ break; case 176: data[9] |= 0x04; /* reg 8, 2:1 scale down */ /* fall thru */ case 352: data[3] = 0x2c; /* reg 2, H size */ data[4] = 0x48; /* reg 3, V size */ data[6] = 0x94; /* reg 5, H start */ data[8] = 0x63; /* reg 7, V start */ if (sd->do_lcd_stop) data[8] = 0x64; /* Bayer tile shifted */ break; } err_code = mr_write(gspca_dev, 11); if (err_code < 0) return err_code; if (!sd->sensor_type) { static const struct sensor_w_data vga_sensor0_init_data[] = { {0x01, 0x00, {0x0c, 0x00, 0x04}, 3}, {0x14, 0x00, {0x01, 0xe4, 0x02, 0x84}, 4}, {0x20, 0x00, {0x00, 0x80, 0x00, 0x08}, 4}, {0x25, 0x00, {0x03, 0xa9, 0x80}, 3}, {0x30, 0x00, {0x30, 0x18, 0x10, 0x18}, 4}, {0, 0, {0}, 0} }; err_code = sensor_write_regs(gspca_dev, vga_sensor0_init_data, ARRAY_SIZE(vga_sensor0_init_data)); } else if (sd->sensor_type == 1) { static const struct sensor_w_data color_adj[] = { {0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00, /* adjusted blue, green, red gain correct too much blue from the Sakar Digital */ 0x05, 0x01, 0x04}, 8} }; static const struct sensor_w_data color_no_adj[] = { {0x02, 0x00, {0x06, 0x59, 0x0c, 0x16, 0x00, /* default blue, green, red gain settings */ 0x07, 0x00, 0x01}, 8} }; static const struct sensor_w_data vga_sensor1_init_data[] = { {0x11, 0x04, {0x01}, 1}, {0x0a, 0x00, {0x00, 0x01, 0x00, 0x00, 0x01, /* These settings may be better for some cameras */ /* {0x0a, 0x00, {0x01, 0x06, 0x00, 0x00, 0x01, */ 0x00, 0x0a}, 7}, {0x11, 0x04, {0x01}, 1}, {0x12, 0x00, {0x00, 0x63, 0x00, 0x70, 0x00, 0x00}, 6}, {0x11, 0x04, {0x01}, 1}, {0, 0, {0}, 0} }; if (sd->adj_colors) err_code = sensor_write_regs(gspca_dev, color_adj, ARRAY_SIZE(color_adj)); else err_code = sensor_write_regs(gspca_dev, color_no_adj, ARRAY_SIZE(color_no_adj)); if (err_code < 0) return err_code; err_code = sensor_write_regs(gspca_dev, vga_sensor1_init_data, ARRAY_SIZE(vga_sensor1_init_data)); } else { /* sensor type == 2 */ static const struct sensor_w_data vga_sensor2_init_data[] = { {0x01, 0x00, {0x48}, 1}, {0x02, 0x00, {0x22}, 1}, /* Reg 3 msb and 4 is lsb of the exposure setting*/ {0x05, 0x00, {0x10}, 1}, {0x06, 0x00, {0x00}, 1}, {0x07, 0x00, {0x00}, 1}, {0x08, 0x00, {0x00}, 1}, {0x09, 0x00, {0x00}, 1}, /* The following are used in the gain control * which is BTW completely borked in the OEM driver * The values for each color go from 0 to 0x7ff *{0x0a, 0x00, {0x01}, 1}, green1 gain msb *{0x0b, 0x00, {0x10}, 1}, green1 gain lsb *{0x0c, 0x00, {0x01}, 1}, red gain msb *{0x0d, 0x00, {0x10}, 1}, red gain lsb *{0x0e, 0x00, {0x01}, 1}, blue gain msb *{0x0f, 0x00, {0x10}, 1}, blue gain lsb *{0x10, 0x00, {0x01}, 1}, green2 gain msb *{0x11, 0x00, {0x10}, 1}, green2 gain lsb */ {0x12, 0x00, {0x00}, 1}, {0x13, 0x00, {0x04}, 1}, /* weird effect on colors */ {0x14, 0x00, {0x00}, 1}, {0x15, 0x00, {0x06}, 1}, {0x16, 0x00, {0x01}, 1}, {0x17, 0x00, {0xe2}, 1}, /* vertical alignment */ {0x18, 0x00, {0x02}, 1}, {0x19, 0x00, {0x82}, 1}, /* don't mess with */ {0x1a, 0x00, {0x00}, 1}, {0x1b, 0x00, {0x20}, 1}, /* {0x1c, 0x00, {0x17}, 1}, contrast control */ {0x1d, 0x00, {0x80}, 1}, /* moving causes a mess */ {0x1e, 0x00, {0x08}, 1}, /* moving jams the camera */ {0x1f, 0x00, {0x0c}, 1}, {0x20, 0x00, {0x00}, 1}, {0, 0, {0}, 0} }; err_code = sensor_write_regs(gspca_dev, vga_sensor2_init_data, ARRAY_SIZE(vga_sensor2_init_data)); } return err_code; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err_code; sd->sof_read = 0; /* Some of the VGA cameras require the memory pointer * to be set to 0 again. We have been forced to start the * stream in sd_config() to detect the hardware, and closed it. * Thus, we need here to do a completely fresh and clean start. */ err_code = zero_the_pointer(gspca_dev); if (err_code < 0) return err_code; err_code = stream_start(gspca_dev); if (err_code < 0) return err_code; if (sd->cam_type == CAM_TYPE_CIF) { err_code = start_cif_cam(gspca_dev); } else { err_code = start_vga_cam(gspca_dev); } if (err_code < 0) return err_code; setbrightness(gspca_dev); setcontrast(gspca_dev); setexposure(gspca_dev); setgain(gspca_dev); return isoc_enable(gspca_dev); } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; stream_stop(gspca_dev); /* Not all the cams need this, but even if not, probably a good idea */ zero_the_pointer(gspca_dev); if (sd->do_lcd_stop) lcd_stop(gspca_dev); } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 val; u8 sign_reg = 7; /* This reg and the next one used on CIF cams. */ u8 value_reg = 8; /* VGA cams seem to use regs 0x0b and 0x0c */ static const u8 quick_clix_table[] = /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ { 0, 4, 8, 12, 1, 2, 3, 5, 6, 9, 7, 10, 13, 11, 14, 15}; /* * This control is disabled for CIF type 1 and VGA type 0 cameras. * It does not quite act linearly for the Argus QuickClix camera, * but it does control brightness. The values are 0 - 15 only, and * the table above makes them act consecutively. */ if ((gspca_dev->ctrl_dis & (1 << NORM_BRIGHTNESS_IDX)) && (gspca_dev->ctrl_dis & (1 << ARGUS_QC_BRIGHTNESS_IDX))) return; if (sd->cam_type == CAM_TYPE_VGA) { sign_reg += 4; value_reg += 4; } /* Note register 7 is also seen as 0x8x or 0xCx in some dumps */ if (sd->brightness > 0) { sensor_write1(gspca_dev, sign_reg, 0x00); val = sd->brightness; } else { sensor_write1(gspca_dev, sign_reg, 0x01); val = (257 - sd->brightness); } /* Use lookup table for funky Argus QuickClix brightness */ if (sd->do_lcd_stop) val = quick_clix_table[val]; sensor_write1(gspca_dev, value_reg, val); } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int exposure = MR97310A_EXPOSURE_DEFAULT; u8 buf[2]; if (gspca_dev->ctrl_dis & (1 << EXPOSURE_IDX)) return; if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1) { /* This cam does not like exposure settings < 300, so scale 0 - 4095 to 300 - 4095 */ exposure = (sd->exposure * 9267) / 10000 + 300; sensor_write1(gspca_dev, 3, exposure >> 4); sensor_write1(gspca_dev, 4, exposure & 0x0f); } else if (sd->sensor_type == 2) { exposure = sd->exposure; exposure >>= 3; sensor_write1(gspca_dev, 3, exposure >> 8); sensor_write1(gspca_dev, 4, exposure & 0xff); } else { /* We have both a clock divider and an exposure register. We first calculate the clock divider, as that determines the maximum exposure and then we calculate the exposure register setting (which goes from 0 - 511). Note our 0 - 4095 exposure is mapped to 0 - 511 milliseconds exposure time */ u8 clockdiv = (60 * sd->exposure + 7999) / 8000; /* Limit framerate to not exceed usb bandwidth */ if (clockdiv < sd->min_clockdiv && gspca_dev->width >= 320) clockdiv = sd->min_clockdiv; else if (clockdiv < 2) clockdiv = 2; if (sd->cam_type == CAM_TYPE_VGA && clockdiv < 4) clockdiv = 4; /* Frame exposure time in ms = 1000 * clockdiv / 60 -> exposure = (sd->exposure / 8) * 511 / (1000 * clockdiv / 60) */ exposure = (60 * 511 * sd->exposure) / (8000 * clockdiv); if (exposure > 511) exposure = 511; /* exposure register value is reversed! */ exposure = 511 - exposure; buf[0] = exposure & 0xff; buf[1] = exposure >> 8; sensor_write_reg(gspca_dev, 0x0e, 0, buf, 2); sensor_write1(gspca_dev, 0x02, clockdiv); } } static void setgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 gainreg; if ((gspca_dev->ctrl_dis & (1 << GAIN_IDX)) && (gspca_dev->ctrl_dis & (1 << SAKAR_CS_GAIN_IDX))) return; if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1) sensor_write1(gspca_dev, 0x0e, sd->gain); else if (sd->cam_type == CAM_TYPE_VGA && sd->sensor_type == 2) for (gainreg = 0x0a; gainreg < 0x11; gainreg += 2) { sensor_write1(gspca_dev, gainreg, sd->gain >> 8); sensor_write1(gspca_dev, gainreg + 1, sd->gain & 0xff); } else sensor_write1(gspca_dev, 0x10, sd->gain); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (gspca_dev->ctrl_dis & (1 << CONTRAST_IDX)) return; sensor_write1(gspca_dev, 0x1c, sd->contrast); } static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->brightness = val; if (gspca_dev->streaming) setbrightness(gspca_dev); return 0; } static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->brightness; return 0; } static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->exposure = val; if (gspca_dev->streaming) setexposure(gspca_dev); return 0; } static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->exposure; return 0; } static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gain = val; if (gspca_dev->streaming) setgain(gspca_dev); return 0; } static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gain; return 0; } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return 0; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return 0; } static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->min_clockdiv = val; if (gspca_dev->streaming) setexposure(gspca_dev); return 0; } static int sd_getmin_clockdiv(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->min_clockdiv; return 0; } /* Include pac common sof detection functions */ #include "pac_common.h" static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; unsigned char *sof; sof = pac_find_sof(&sd->sof_read, data, len); if (sof) { int n; /* finish decoding current frame */ n = sof - data; if (n > sizeof pac_sof_marker) n -= sizeof pac_sof_marker; else n = 0; gspca_frame_add(gspca_dev, LAST_PACKET, data, n); /* Start next frame. */ gspca_frame_add(gspca_dev, FIRST_PACKET, pac_sof_marker, sizeof pac_sof_marker); len -= sof - data; data = sof; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x08ca, 0x0110)}, /* Trust Spyc@m 100 */ {USB_DEVICE(0x08ca, 0x0111)}, /* Aiptek Pencam VGA+ */ {USB_DEVICE(0x093a, 0x010f)}, /* All other known MR97310A VGA cams */ {USB_DEVICE(0x093a, 0x010e)}, /* All known MR97310A CIF cams */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
tako0910/m7-kernel
drivers/media/video/gspca/spca501.c
4956
55922
/* * SPCA501 chip based cameras initialization data * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "spca501" #include "gspca.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA501 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned short contrast; __u8 brightness; __u8 colors; __u8 blue_balance; __u8 red_balance; char subtype; #define Arowana300KCMOSCamera 0 #define IntelCreateAndShare 1 #define KodakDVC325 2 #define MystFromOriUnknownCamera 3 #define SmileIntlCamera 4 #define ThreeComHomeConnectLite 5 #define ViewQuestM318B 6 }; /* V4L2 controls supported by the driver */ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val); static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val); static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { #define MY_BRIGHTNESS 0 { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 127, .step = 1, .default_value = 0, }, .set = sd_setbrightness, .get = sd_getbrightness, }, #define MY_CONTRAST 1 { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 64725, .step = 1, .default_value = 64725, }, .set = sd_setcontrast, .get = sd_getcontrast, }, #define MY_COLOR 2 { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Color", .minimum = 0, .maximum = 63, .step = 1, .default_value = 20, }, .set = sd_setcolors, .get = sd_getcolors, }, #define MY_BLUE_BALANCE 3 { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 0, }, .set = sd_setblue_balance, .get = sd_getblue_balance, }, #define MY_RED_BALANCE 4 { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0, .maximum = 127, .step = 1, .default_value = 0, }, .set = sd_setred_balance, .get = sd_getred_balance, }, }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {640, 480, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; #define SPCA50X_REG_USB 0x2 /* spca505 501 */ /* * Data to initialize a SPCA501. From a capture file provided by Bill Roehl * With SPCA501 chip description */ #define CCDSP_SET /* set CCDSP parameters */ #define TG_SET /* set time generator set */ #undef DSPWIN_SET /* set DSP windows parameters */ #undef ALTER_GAMA /* Set alternate set to YUV transform coeffs. */ #define SPCA501_SNAPBIT 0x80 #define SPCA501_SNAPCTRL 0x10 /* Frame packet header offsets for the spca501 */ #define SPCA501_OFFSET_GPIO 1 #define SPCA501_OFFSET_TYPE 2 #define SPCA501_OFFSET_TURN3A 3 #define SPCA501_OFFSET_FRAMSEQ 4 #define SPCA501_OFFSET_COMPRESS 5 #define SPCA501_OFFSET_QUANT 6 #define SPCA501_OFFSET_QUANT2 7 #define SPCA501_OFFSET_DATA 8 #define SPCA501_PROP_COMP_ENABLE(d) ((d) & 1) #define SPCA501_PROP_SNAP(d) ((d) & 0x40) #define SPCA501_PROP_SNAP_CTRL(d) ((d) & 0x10) #define SPCA501_PROP_COMP_THRESH(d) (((d) & 0x0e) >> 1) #define SPCA501_PROP_COMP_QUANT(d) (((d) & 0x70) >> 4) /* SPCA501 CCDSP control */ #define SPCA501_REG_CCDSP 0x01 /* SPCA501 control/status registers */ #define SPCA501_REG_CTLRL 0x02 /* registers for color correction and YUV transformation */ #define SPCA501_A11 0x08 #define SPCA501_A12 0x09 #define SPCA501_A13 0x0A #define SPCA501_A21 0x0B #define SPCA501_A22 0x0C #define SPCA501_A23 0x0D #define SPCA501_A31 0x0E #define SPCA501_A32 0x0F #define SPCA501_A33 0x10 /* Data for video camera initialization before capturing */ static const __u16 spca501_open_data[][3] = { /* bmRequest,value,index */ {0x2, 0x50, 0x00}, /* C/S enable soft reset */ {0x2, 0x40, 0x00}, /* C/S disable soft reset */ {0x2, 0x02, 0x05}, /* C/S general purpose I/O data */ {0x2, 0x03, 0x05}, /* C/S general purpose I/O data */ #ifdef CCDSP_SET {0x1, 0x38, 0x01}, /* CCDSP options */ {0x1, 0x05, 0x02}, /* CCDSP Optical black level for user settings */ {0x1, 0xC0, 0x03}, /* CCDSP Optical black settings */ {0x1, 0x67, 0x07}, {0x1, 0x63, 0x3f}, /* CCDSP CCD gamma enable */ {0x1, 0x03, 0x56}, /* Add gamma correction */ {0x1, 0xFF, 0x15}, /* CCDSP High luminance for white balance */ {0x1, 0x01, 0x16}, /* CCDSP Low luminance for white balance */ /* Color correction and RGB-to-YUV transformation coefficients changing */ #ifdef ALTER_GAMA {0x0, 0x00, 0x08}, /* A11 */ {0x0, 0x00, 0x09}, /* A12 */ {0x0, 0x90, 0x0A}, /* A13 */ {0x0, 0x12, 0x0B}, /* A21 */ {0x0, 0x00, 0x0C}, /* A22 */ {0x0, 0x00, 0x0D}, /* A23 */ {0x0, 0x00, 0x0E}, /* A31 */ {0x0, 0x02, 0x0F}, /* A32 */ {0x0, 0x00, 0x10}, /* A33 */ #else {0x1, 0x2a, 0x08}, /* A11 0x31 */ {0x1, 0xf8, 0x09}, /* A12 f8 */ {0x1, 0xf8, 0x0A}, /* A13 f8 */ {0x1, 0xf8, 0x0B}, /* A21 f8 */ {0x1, 0x14, 0x0C}, /* A22 0x14 */ {0x1, 0xf8, 0x0D}, /* A23 f8 */ {0x1, 0xf8, 0x0E}, /* A31 f8 */ {0x1, 0xf8, 0x0F}, /* A32 f8 */ {0x1, 0x20, 0x10}, /* A33 0x20 */ #endif {0x1, 0x00, 0x11}, /* R offset */ {0x1, 0x00, 0x12}, /* G offset */ {0x1, 0x00, 0x13}, /* B offset */ {0x1, 0x00, 0x14}, /* GB offset */ #endif #ifdef TG_SET /* Time generator manipulations */ {0x0, 0xfc, 0x0}, /* Set up high bits of shutter speed */ {0x0, 0x01, 0x1}, /* Set up low bits of shutter speed */ {0x0, 0xe4, 0x04}, /* DCLK*2 clock phase adjustment */ {0x0, 0x08, 0x05}, /* ADCK phase adjustment, inv. ext. VB */ {0x0, 0x03, 0x06}, /* FR phase adjustment */ {0x0, 0x01, 0x07}, /* FCDS phase adjustment */ {0x0, 0x39, 0x08}, /* FS phase adjustment */ {0x0, 0x88, 0x0a}, /* FH1 phase and delay adjustment */ {0x0, 0x03, 0x0f}, /* pixel identification */ {0x0, 0x00, 0x11}, /* clock source selection (default) */ /*VERY strange manipulations with * select DMCLP or OBPX to be ADCLP output (0x0C) * OPB always toggle or not (0x0D) but they allow * us to set up brightness */ {0x0, 0x01, 0x0c}, {0x0, 0xe0, 0x0d}, /* Done */ #endif #ifdef DSPWIN_SET {0x1, 0xa0, 0x01}, /* Setting image processing parameters */ {0x1, 0x1c, 0x17}, /* Changing Windows positions X1 */ {0x1, 0xe2, 0x19}, /* X2 */ {0x1, 0x1c, 0x1b}, /* X3 */ {0x1, 0xe2, 0x1d}, /* X4 */ {0x1, 0x5f, 0x1f}, /* X5 */ {0x1, 0x32, 0x20}, /* Y5 */ {0x1, 0x01, 0x10}, /* Changing A33 */ #endif {0x2, 0x204a, 0x07},/* Setting video compression & resolution 160x120 */ {0x2, 0x94, 0x06}, /* Setting video no compression */ {} }; /* The SPCAxxx docs from Sunplus document these values in tables, one table per register number. In the data below, dmRequest is the register number, index is the Addr, and value is a combination of Bit values. Bit Value (hex) 0 01 1 02 2 04 3 08 4 10 5 20 6 40 7 80 */ /* Data for chip initialization (set default values) */ static const __u16 spca501_init_data[][3] = { /* Set all the values to powerup defaults */ /* bmRequest,value,index */ {0x0, 0xAA, 0x00}, {0x0, 0x02, 0x01}, {0x0, 0x01, 0x02}, {0x0, 0x02, 0x03}, {0x0, 0xCE, 0x04}, {0x0, 0x00, 0x05}, {0x0, 0x00, 0x06}, {0x0, 0x00, 0x07}, {0x0, 0x00, 0x08}, {0x0, 0x00, 0x09}, {0x0, 0x90, 0x0A}, {0x0, 0x12, 0x0B}, {0x0, 0x00, 0x0C}, {0x0, 0x00, 0x0D}, {0x0, 0x00, 0x0E}, {0x0, 0x02, 0x0F}, {0x0, 0x00, 0x10}, {0x0, 0x00, 0x11}, {0x0, 0x00, 0x12}, {0x0, 0x00, 0x13}, {0x0, 0x00, 0x14}, {0x0, 0x00, 0x15}, {0x0, 0x00, 0x16}, {0x0, 0x00, 0x17}, {0x0, 0x00, 0x18}, {0x0, 0x00, 0x19}, {0x0, 0x00, 0x1A}, {0x0, 0x00, 0x1B}, {0x0, 0x00, 0x1C}, {0x0, 0x00, 0x1D}, {0x0, 0x00, 0x1E}, {0x0, 0x00, 0x1F}, {0x0, 0x00, 0x20}, {0x0, 0x00, 0x21}, {0x0, 0x00, 0x22}, {0x0, 0x00, 0x23}, {0x0, 0x00, 0x24}, {0x0, 0x00, 0x25}, {0x0, 0x00, 0x26}, {0x0, 0x00, 0x27}, {0x0, 0x00, 0x28}, {0x0, 0x00, 0x29}, {0x0, 0x00, 0x2A}, {0x0, 0x00, 0x2B}, {0x0, 0x00, 0x2C}, {0x0, 0x00, 0x2D}, {0x0, 0x00, 0x2E}, {0x0, 0x00, 0x2F}, {0x0, 0x00, 0x30}, {0x0, 0x00, 0x31}, {0x0, 0x00, 0x32}, {0x0, 0x00, 0x33}, {0x0, 0x00, 0x34}, {0x0, 0x00, 0x35}, {0x0, 0x00, 0x36}, {0x0, 0x00, 0x37}, {0x0, 0x00, 0x38}, {0x0, 0x00, 0x39}, {0x0, 0x00, 0x3A}, {0x0, 0x00, 0x3B}, {0x0, 0x00, 0x3C}, {0x0, 0x00, 0x3D}, {0x0, 0x00, 0x3E}, {0x0, 0x00, 0x3F}, {0x0, 0x00, 0x40}, {0x0, 0x00, 0x41}, {0x0, 0x00, 0x42}, {0x0, 0x00, 0x43}, {0x0, 0x00, 0x44}, {0x0, 0x00, 0x45}, {0x0, 0x00, 0x46}, {0x0, 0x00, 0x47}, {0x0, 0x00, 0x48}, {0x0, 0x00, 0x49}, {0x0, 0x00, 0x4A}, {0x0, 0x00, 0x4B}, {0x0, 0x00, 0x4C}, {0x0, 0x00, 0x4D}, {0x0, 0x00, 0x4E}, {0x0, 0x00, 0x4F}, {0x0, 0x00, 0x50}, {0x0, 0x00, 0x51}, {0x0, 0x00, 0x52}, {0x0, 0x00, 0x53}, {0x0, 0x00, 0x54}, {0x0, 0x00, 0x55}, {0x0, 0x00, 0x56}, {0x0, 0x00, 0x57}, {0x0, 0x00, 0x58}, {0x0, 0x00, 0x59}, {0x0, 0x00, 0x5A}, {0x0, 0x00, 0x5B}, {0x0, 0x00, 0x5C}, {0x0, 0x00, 0x5D}, {0x0, 0x00, 0x5E}, {0x0, 0x00, 0x5F}, {0x0, 0x00, 0x60}, {0x0, 0x00, 0x61}, {0x0, 0x00, 0x62}, {0x0, 0x00, 0x63}, {0x0, 0x00, 0x64}, {0x0, 0x00, 0x65}, {0x0, 0x00, 0x66}, {0x0, 0x00, 0x67}, {0x0, 0x00, 0x68}, {0x0, 0x00, 0x69}, {0x0, 0x00, 0x6A}, {0x0, 0x00, 0x6B}, {0x0, 0x00, 0x6C}, {0x0, 0x00, 0x6D}, {0x0, 0x00, 0x6E}, {0x0, 0x00, 0x6F}, {0x0, 0x00, 0x70}, {0x0, 0x00, 0x71}, {0x0, 0x00, 0x72}, {0x0, 0x00, 0x73}, {0x0, 0x00, 0x74}, {0x0, 0x00, 0x75}, {0x0, 0x00, 0x76}, {0x0, 0x00, 0x77}, {0x0, 0x00, 0x78}, {0x0, 0x00, 0x79}, {0x0, 0x00, 0x7A}, {0x0, 0x00, 0x7B}, {0x0, 0x00, 0x7C}, {0x0, 0x00, 0x7D}, {0x0, 0x00, 0x7E}, {0x0, 0x00, 0x7F}, {0x0, 0x00, 0x80}, {0x0, 0x00, 0x81}, {0x0, 0x00, 0x82}, {0x0, 0x00, 0x83}, {0x0, 0x00, 0x84}, {0x0, 0x00, 0x85}, {0x0, 0x00, 0x86}, {0x0, 0x00, 0x87}, {0x0, 0x00, 0x88}, {0x0, 0x00, 0x89}, {0x0, 0x00, 0x8A}, {0x0, 0x00, 0x8B}, {0x0, 0x00, 0x8C}, {0x0, 0x00, 0x8D}, {0x0, 0x00, 0x8E}, {0x0, 0x00, 0x8F}, {0x0, 0x00, 0x90}, {0x0, 0x00, 0x91}, {0x0, 0x00, 0x92}, {0x0, 0x00, 0x93}, {0x0, 0x00, 0x94}, {0x0, 0x00, 0x95}, {0x0, 0x00, 0x96}, {0x0, 0x00, 0x97}, {0x0, 0x00, 0x98}, {0x0, 0x00, 0x99}, {0x0, 0x00, 0x9A}, {0x0, 0x00, 0x9B}, {0x0, 0x00, 0x9C}, {0x0, 0x00, 0x9D}, {0x0, 0x00, 0x9E}, {0x0, 0x00, 0x9F}, {0x0, 0x00, 0xA0}, {0x0, 0x00, 0xA1}, {0x0, 0x00, 0xA2}, {0x0, 0x00, 0xA3}, {0x0, 0x00, 0xA4}, {0x0, 0x00, 0xA5}, {0x0, 0x00, 0xA6}, {0x0, 0x00, 0xA7}, {0x0, 0x00, 0xA8}, {0x0, 0x00, 0xA9}, {0x0, 0x00, 0xAA}, {0x0, 0x00, 0xAB}, {0x0, 0x00, 0xAC}, {0x0, 0x00, 0xAD}, {0x0, 0x00, 0xAE}, {0x0, 0x00, 0xAF}, {0x0, 0x00, 0xB0}, {0x0, 0x00, 0xB1}, {0x0, 0x00, 0xB2}, {0x0, 0x00, 0xB3}, {0x0, 0x00, 0xB4}, {0x0, 0x00, 0xB5}, {0x0, 0x00, 0xB6}, {0x0, 0x00, 0xB7}, {0x0, 0x00, 0xB8}, {0x0, 0x00, 0xB9}, {0x0, 0x00, 0xBA}, {0x0, 0x00, 0xBB}, {0x0, 0x00, 0xBC}, {0x0, 0x00, 0xBD}, {0x0, 0x00, 0xBE}, {0x0, 0x00, 0xBF}, {0x0, 0x00, 0xC0}, {0x0, 0x00, 0xC1}, {0x0, 0x00, 0xC2}, {0x0, 0x00, 0xC3}, {0x0, 0x00, 0xC4}, {0x0, 0x00, 0xC5}, {0x0, 0x00, 0xC6}, {0x0, 0x00, 0xC7}, {0x0, 0x00, 0xC8}, {0x0, 0x00, 0xC9}, {0x0, 0x00, 0xCA}, {0x0, 0x00, 0xCB}, {0x0, 0x00, 0xCC}, {0x1, 0xF4, 0x00}, {0x1, 0x38, 0x01}, {0x1, 0x40, 0x02}, {0x1, 0x0A, 0x03}, {0x1, 0x40, 0x04}, {0x1, 0x40, 0x05}, {0x1, 0x40, 0x06}, {0x1, 0x67, 0x07}, {0x1, 0x31, 0x08}, {0x1, 0x00, 0x09}, {0x1, 0x00, 0x0A}, {0x1, 0x00, 0x0B}, {0x1, 0x14, 0x0C}, {0x1, 0x00, 0x0D}, {0x1, 0x00, 0x0E}, {0x1, 0x00, 0x0F}, {0x1, 0x1E, 0x10}, {0x1, 0x00, 0x11}, {0x1, 0x00, 0x12}, {0x1, 0x00, 0x13}, {0x1, 0x00, 0x14}, {0x1, 0xFF, 0x15}, {0x1, 0x01, 0x16}, {0x1, 0x32, 0x17}, {0x1, 0x23, 0x18}, {0x1, 0xCE, 0x19}, {0x1, 0x23, 0x1A}, {0x1, 0x32, 0x1B}, {0x1, 0x8D, 0x1C}, {0x1, 0xCE, 0x1D}, {0x1, 0x8D, 0x1E}, {0x1, 0x00, 0x1F}, {0x1, 0x00, 0x20}, {0x1, 0xFF, 0x3E}, {0x1, 0x02, 0x3F}, {0x1, 0x00, 0x40}, {0x1, 0x00, 0x41}, {0x1, 0x00, 0x42}, {0x1, 0x00, 0x43}, {0x1, 0x00, 0x44}, {0x1, 0x00, 0x45}, {0x1, 0x00, 0x46}, {0x1, 0x00, 0x47}, {0x1, 0x00, 0x48}, {0x1, 0x00, 0x49}, {0x1, 0x00, 0x4A}, {0x1, 0x00, 0x4B}, {0x1, 0x00, 0x4C}, {0x1, 0x00, 0x4D}, {0x1, 0x00, 0x4E}, {0x1, 0x00, 0x4F}, {0x1, 0x00, 0x50}, {0x1, 0x00, 0x51}, {0x1, 0x00, 0x52}, {0x1, 0x00, 0x53}, {0x1, 0x00, 0x54}, {0x1, 0x00, 0x55}, {0x1, 0x00, 0x56}, {0x1, 0x00, 0x57}, {0x1, 0x00, 0x58}, {0x1, 0x00, 0x59}, {0x1, 0x00, 0x5A}, {0x2, 0x03, 0x00}, {0x2, 0x00, 0x01}, {0x2, 0x00, 0x05}, {0x2, 0x00, 0x06}, {0x2, 0x00, 0x07}, {0x2, 0x00, 0x10}, {0x2, 0x00, 0x11}, /* Strange - looks like the 501 driver doesn't do anything * at insert time except read the EEPROM */ {} }; /* Data for video camera init before capture. * Capture and decoding by Colin Peart. * This is is for the 3com HomeConnect Lite which is spca501a based. */ static const __u16 spca501_3com_open_data[][3] = { /* bmRequest,value,index */ {0x2, 0x0050, 0x0000}, /* C/S Enable TG soft reset, timing mode=010 */ {0x2, 0x0043, 0x0000}, /* C/S Disable TG soft reset, timing mode=010 */ {0x2, 0x0002, 0x0005}, /* C/S GPIO */ {0x2, 0x0003, 0x0005}, /* C/S GPIO */ #ifdef CCDSP_SET {0x1, 0x0020, 0x0001}, /* CCDSP Options */ {0x1, 0x0020, 0x0002}, /* CCDSP Black Level */ {0x1, 0x006e, 0x0007}, /* CCDSP Gamma options */ {0x1, 0x0090, 0x0015}, /* CCDSP Luminance Low */ {0x1, 0x00ff, 0x0016}, /* CCDSP Luminance High */ {0x1, 0x0003, 0x003F}, /* CCDSP Gamma correction toggle */ #ifdef ALTER_GAMMA {0x1, 0x0010, 0x0008}, /* CCDSP YUV A11 */ {0x1, 0x0000, 0x0009}, /* CCDSP YUV A12 */ {0x1, 0x0000, 0x000a}, /* CCDSP YUV A13 */ {0x1, 0x0000, 0x000b}, /* CCDSP YUV A21 */ {0x1, 0x0010, 0x000c}, /* CCDSP YUV A22 */ {0x1, 0x0000, 0x000d}, /* CCDSP YUV A23 */ {0x1, 0x0000, 0x000e}, /* CCDSP YUV A31 */ {0x1, 0x0000, 0x000f}, /* CCDSP YUV A32 */ {0x1, 0x0010, 0x0010}, /* CCDSP YUV A33 */ {0x1, 0x0000, 0x0011}, /* CCDSP R Offset */ {0x1, 0x0000, 0x0012}, /* CCDSP G Offset */ {0x1, 0x0001, 0x0013}, /* CCDSP B Offset */ {0x1, 0x0001, 0x0014}, /* CCDSP BG Offset */ {0x1, 0x003f, 0x00C1}, /* CCDSP Gamma Correction Enable */ #endif #endif #ifdef TG_SET {0x0, 0x00fc, 0x0000}, /* TG Shutter Speed High Bits */ {0x0, 0x0000, 0x0001}, /* TG Shutter Speed Low Bits */ {0x0, 0x00e4, 0x0004}, /* TG DCLK*2 Adjust */ {0x0, 0x0008, 0x0005}, /* TG ADCK Adjust */ {0x0, 0x0003, 0x0006}, /* TG FR Phase Adjust */ {0x0, 0x0001, 0x0007}, /* TG FCDS Phase Adjust */ {0x0, 0x0039, 0x0008}, /* TG FS Phase Adjust */ {0x0, 0x0088, 0x000a}, /* TG MH1 */ {0x0, 0x0003, 0x000f}, /* TG Pixel ID */ /* Like below, unexplained toglleing */ {0x0, 0x0080, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0004, 0x000d}, {0x0, 0x0000, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0040, 0x000c}, {0x0, 0x0017, 0x000d}, {0x0, 0x00c0, 0x000c}, {0x0, 0x0000, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0006, 0x000d}, {0x0, 0x0080, 0x000c}, {0x0, 0x0004, 0x000d}, {0x0, 0x0002, 0x0003}, #endif #ifdef DSPWIN_SET {0x1, 0x001c, 0x0017}, /* CCDSP W1 Start X */ {0x1, 0x00e2, 0x0019}, /* CCDSP W2 Start X */ {0x1, 0x001c, 0x001b}, /* CCDSP W3 Start X */ {0x1, 0x00e2, 0x001d}, /* CCDSP W4 Start X */ {0x1, 0x00aa, 0x001f}, /* CCDSP W5 Start X */ {0x1, 0x0070, 0x0020}, /* CCDSP W5 Start Y */ #endif {0x0, 0x0001, 0x0010}, /* TG Start Clock */ /* {0x2, 0x006a, 0x0001}, * C/S Enable ISOSYNCH Packet Engine */ {0x2, 0x0068, 0x0001}, /* C/S Diable ISOSYNCH Packet Engine */ {0x2, 0x0000, 0x0005}, {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ {0x2, 0x0002, 0x0005}, /* C/S GPIO */ {0x2, 0x0003, 0x0005}, /* C/S GPIO */ {0x2, 0x006a, 0x0001}, /* C/S Enable ISOSYNCH Packet Engine */ {} }; /* * Data used to initialize a SPCA501C with HV7131B sensor. * From a capture file taken with USBSnoop v 1.5 * I have a "SPCA501C pc camera chipset" manual by sunplus, but some * of the value meanings are obscure or simply "reserved". * to do list: * 1) Understand what every value means * 2) Understand why some values seem to appear more than once * 3) Write a small comment for each line of the following arrays. */ static const __u16 spca501c_arowana_open_data[][3] = { /* bmRequest,value,index */ {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x01, 0x0006, 0x0011}, {0x01, 0x00ff, 0x0012}, {0x01, 0x0014, 0x0013}, {0x01, 0x0000, 0x0014}, {0x01, 0x0042, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0051, 0x0053}, {0x01, 0x0040, 0x0054}, {0x01, 0x0000, 0x0055}, {0x00, 0x0025, 0x0000}, {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {} }; static const __u16 spca501c_arowana_init_data[][3] = { /* bmRequest,value,index */ {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x01, 0x0006, 0x0011}, {0x01, 0x00ff, 0x0012}, {0x01, 0x0014, 0x0013}, {0x01, 0x0000, 0x0014}, {0x01, 0x0042, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0051, 0x0053}, {0x01, 0x0040, 0x0054}, {0x01, 0x0000, 0x0055}, {0x00, 0x0025, 0x0000}, {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x00c8, 0x0015}, {0x01, 0x0032, 0x0016}, {0x01, 0x0000, 0x0011}, {0x01, 0x0000, 0x0012}, {0x01, 0x0000, 0x0013}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x000f, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x000c, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0000, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, {0x00, 0x0000, 0x0024}, {0x00, 0x00d5, 0x0025}, {0x00, 0x0000, 0x0026}, {0x00, 0x000b, 0x0027}, {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, {0xff, 0x0000, 0x00d0}, {0xff, 0x00d8, 0x00d1}, {0xff, 0x0000, 0x00d4}, {0xff, 0x0000, 0x00d5}, {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0x00fd, 0x000a}, {0x01, 0x0038, 0x000b}, {0x01, 0x00d1, 0x000c}, {0x01, 0x00f7, 0x000d}, {0x01, 0x00ed, 0x000e}, {0x01, 0x00d8, 0x000f}, {0x01, 0x0038, 0x0010}, {0x01, 0x00ff, 0x0015}, {0x01, 0x0001, 0x0016}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, {0x01, 0x00ff, 0x003e}, {0x01, 0x0003, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0001, 0x0056}, {0x01, 0x0060, 0x0057}, {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x100a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc002, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x001e, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x0011, 0x0008}, {0x01, 0x0032, 0x0009}, {0x01, 0xfffd, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffea, 0x000c}, {0x01, 0xfff4, 0x000d}, {0x01, 0xfffc, 0x000e}, {0x01, 0xffe3, 0x000f}, {0x01, 0x001f, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0067, 0x0007}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {0x01, 0x0042, 0x0051}, {0x01, 0x0051, 0x0053}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x002d, 0x0000}, {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x02, 0xc000, 0x0001}, {0x02, 0x0000, 0x0005}, {} }; /* Unknown camera from Ori Usbid 0x0000:0x0000 */ /* Based on snoops from Ori Cohen */ static const __u16 spca501c_mysterious_open_data[][3] = { {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, /* DSP Registers */ {0x01, 0x0016, 0x0011}, /* RGB offset */ {0x01, 0x0000, 0x0012}, {0x01, 0x0006, 0x0013}, {0x01, 0x0078, 0x0051}, {0x01, 0x0040, 0x0052}, {0x01, 0x0046, 0x0053}, {0x01, 0x0040, 0x0054}, {0x00, 0x0025, 0x0000}, /* {0x00, 0x0000, 0x0000 }, */ /* Part 2 */ /* TG Registers */ {0x00, 0x0026, 0x0000}, {0x00, 0x0001, 0x0000}, {0x00, 0x0027, 0x0000}, {0x00, 0x008a, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x2000, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0015, 0x0001}, {0x05, 0x00ea, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0023, 0x0001}, {0x05, 0x0003, 0x0000}, {0x05, 0x0030, 0x0001}, {0x05, 0x002b, 0x0000}, {0x05, 0x0031, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0032, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0033, 0x0001}, {0x05, 0x0023, 0x0000}, {0x05, 0x0034, 0x0001}, {0x05, 0x0002, 0x0000}, {0x05, 0x0050, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0051, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0052, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0054, 0x0001}, {0x05, 0x0001, 0x0000}, {} }; /* Based on snoops from Ori Cohen */ static const __u16 spca501c_mysterious_init_data[][3] = { /* Part 3 */ /* TG registers */ /* {0x00, 0x0000, 0x0000}, */ {0x00, 0x0000, 0x0001}, {0x00, 0x0000, 0x0002}, {0x00, 0x0006, 0x0003}, {0x00, 0x0000, 0x0004}, {0x00, 0x0090, 0x0005}, {0x00, 0x0000, 0x0006}, {0x00, 0x0040, 0x0007}, {0x00, 0x00c0, 0x0008}, {0x00, 0x004a, 0x0009}, {0x00, 0x0000, 0x000a}, {0x00, 0x0000, 0x000b}, {0x00, 0x0001, 0x000c}, {0x00, 0x0001, 0x000d}, {0x00, 0x0000, 0x000e}, {0x00, 0x0002, 0x000f}, {0x00, 0x0001, 0x0010}, {0x00, 0x0000, 0x0011}, {0x00, 0x0001, 0x0012}, {0x00, 0x0002, 0x0020}, {0x00, 0x0080, 0x0021}, /* 640 */ {0x00, 0x0001, 0x0022}, {0x00, 0x00e0, 0x0023}, /* 480 */ {0x00, 0x0000, 0x0024}, /* Offset H hight */ {0x00, 0x00d3, 0x0025}, /* low */ {0x00, 0x0000, 0x0026}, /* Offset V */ {0x00, 0x000d, 0x0027}, /* low */ {0x00, 0x0000, 0x0046}, {0x00, 0x0000, 0x0047}, {0x00, 0x0000, 0x0048}, {0x00, 0x0000, 0x0049}, {0x00, 0x0008, 0x004a}, /* DSP Registers */ {0x01, 0x00a6, 0x0000}, {0x01, 0x0028, 0x0001}, {0x01, 0x0000, 0x0002}, {0x01, 0x000a, 0x0003}, /* Level Calc bit7 ->1 Auto */ {0x01, 0x0040, 0x0004}, {0x01, 0x0066, 0x0007}, {0x01, 0x000f, 0x0008}, /* A11 Color correction coeff */ {0x01, 0x002d, 0x0009}, /* A12 */ {0x01, 0x0005, 0x000a}, /* A13 */ {0x01, 0x0023, 0x000b}, /* A21 */ {0x01, 0x00e0, 0x000c}, /* A22 */ {0x01, 0x00fd, 0x000d}, /* A23 */ {0x01, 0x00f4, 0x000e}, /* A31 */ {0x01, 0x00e4, 0x000f}, /* A32 */ {0x01, 0x0028, 0x0010}, /* A33 */ {0x01, 0x00ff, 0x0015}, /* Reserved */ {0x01, 0x0001, 0x0016}, /* Reserved */ {0x01, 0x0032, 0x0017}, /* Win1 Start begin */ {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x0000, 0x001f}, {0x01, 0x0000, 0x0020}, /* Win1 Start end */ {0x01, 0x00ff, 0x003e}, /* Reserved begin */ {0x01, 0x0002, 0x003f}, {0x01, 0x0000, 0x0040}, {0x01, 0x0035, 0x0041}, {0x01, 0x0053, 0x0042}, {0x01, 0x0069, 0x0043}, {0x01, 0x007c, 0x0044}, {0x01, 0x008c, 0x0045}, {0x01, 0x009a, 0x0046}, {0x01, 0x00a8, 0x0047}, {0x01, 0x00b4, 0x0048}, {0x01, 0x00bf, 0x0049}, {0x01, 0x00ca, 0x004a}, {0x01, 0x00d4, 0x004b}, {0x01, 0x00dd, 0x004c}, {0x01, 0x00e7, 0x004d}, {0x01, 0x00ef, 0x004e}, {0x01, 0x00f8, 0x004f}, {0x01, 0x00ff, 0x0050}, {0x01, 0x0003, 0x0056}, /* Reserved end */ {0x01, 0x0060, 0x0057}, /* Edge Gain */ {0x01, 0x0040, 0x0058}, {0x01, 0x0011, 0x0059}, /* Edge Bandwidth */ {0x01, 0x0001, 0x005a}, {0x02, 0x0007, 0x0005}, {0x02, 0xa048, 0x0000}, {0x02, 0x0007, 0x0005}, {0x02, 0x0015, 0x0006}, {0x02, 0x200a, 0x0007}, {0x02, 0xa048, 0x0000}, {0x02, 0xc000, 0x0001}, {0x02, 0x000f, 0x0005}, {0x02, 0xa048, 0x0000}, {0x05, 0x0022, 0x0004}, {0x05, 0x0025, 0x0001}, {0x05, 0x0000, 0x0000}, /* Part 4 */ {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0001, 0x0001}, {0x05, 0x0000, 0x0000}, {0x05, 0x0021, 0x0001}, {0x05, 0x00d2, 0x0000}, {0x05, 0x0020, 0x0001}, {0x05, 0x0000, 0x0000}, {0x00, 0x0090, 0x0005}, {0x01, 0x00a6, 0x0000}, {0x02, 0x0000, 0x0005}, {0x05, 0x0026, 0x0001}, {0x05, 0x0001, 0x0000}, {0x05, 0x0027, 0x0001}, {0x05, 0x004e, 0x0000}, /* Part 5 */ {0x01, 0x0003, 0x003f}, {0x01, 0x0001, 0x0056}, {0x01, 0x000f, 0x0008}, {0x01, 0x002d, 0x0009}, {0x01, 0x0005, 0x000a}, {0x01, 0x0023, 0x000b}, {0x01, 0xffe0, 0x000c}, {0x01, 0xfffd, 0x000d}, {0x01, 0xfff4, 0x000e}, {0x01, 0xffe4, 0x000f}, {0x01, 0x0028, 0x0010}, {0x01, 0x00a8, 0x0001}, {0x01, 0x0066, 0x0007}, {0x01, 0x0032, 0x0017}, {0x01, 0x0023, 0x0018}, {0x01, 0x00ce, 0x0019}, {0x01, 0x0023, 0x001a}, {0x01, 0x0032, 0x001b}, {0x01, 0x008d, 0x001c}, {0x01, 0x00ce, 0x001d}, {0x01, 0x008d, 0x001e}, {0x01, 0x00c8, 0x0015}, /* c8 Poids fort Luma */ {0x01, 0x0032, 0x0016}, /* 32 */ {0x01, 0x0016, 0x0011}, /* R 00 */ {0x01, 0x0016, 0x0012}, /* G 00 */ {0x01, 0x0016, 0x0013}, /* B 00 */ {0x01, 0x000a, 0x0003}, {0x02, 0xc002, 0x0001}, {0x02, 0x0007, 0x0005}, {} }; static int reg_write(struct usb_device *dev, __u16 req, __u16 index, __u16 value) { int ret; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); PDEBUG(D_USBO, "reg write: 0x%02x 0x%02x 0x%02x", req, index, value); if (ret < 0) pr_err("reg write: error %d\n", ret); return ret; } static int write_vector(struct gspca_dev *gspca_dev, const __u16 data[][3]) { struct usb_device *dev = gspca_dev->dev; int ret, i = 0; while (data[i][0] != 0 || data[i][1] != 0 || data[i][2] != 0) { ret = reg_write(dev, data[i][0], data[i][2], data[i][1]); if (ret < 0) { PDEBUG(D_ERR, "Reg write failed for 0x%02x,0x%02x,0x%02x", data[i][0], data[i][1], data[i][2]); return ret; } i++; } return 0; } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x12, sd->brightness); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_write(gspca_dev->dev, 0x00, 0x00, (sd->contrast >> 8) & 0xff); reg_write(gspca_dev->dev, 0x00, 0x01, sd->contrast & 0xff); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x0c, sd->colors); } static void setblue_balance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x11, sd->blue_balance); } static void setred_balance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x13, sd->red_balance); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); sd->subtype = id->driver_info; sd->brightness = sd_ctrls[MY_BRIGHTNESS].qctrl.default_value; sd->contrast = sd_ctrls[MY_CONTRAST].qctrl.default_value; sd->colors = sd_ctrls[MY_COLOR].qctrl.default_value; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->subtype) { case Arowana300KCMOSCamera: case SmileIntlCamera: /* Arowana 300k CMOS Camera data */ if (write_vector(gspca_dev, spca501c_arowana_init_data)) goto error; break; case MystFromOriUnknownCamera: /* Unknown Ori CMOS Camera data */ if (write_vector(gspca_dev, spca501c_mysterious_open_data)) goto error; break; default: /* generic spca501 init data */ if (write_vector(gspca_dev, spca501_init_data)) goto error; break; } PDEBUG(D_STREAM, "Initializing SPCA501 finished"); return 0; error: return -EINVAL; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct usb_device *dev = gspca_dev->dev; int mode; switch (sd->subtype) { case ThreeComHomeConnectLite: /* Special handling for 3com data */ write_vector(gspca_dev, spca501_3com_open_data); break; case Arowana300KCMOSCamera: case SmileIntlCamera: /* Arowana 300k CMOS Camera data */ write_vector(gspca_dev, spca501c_arowana_open_data); break; case MystFromOriUnknownCamera: /* Unknown CMOS Camera data */ write_vector(gspca_dev, spca501c_mysterious_init_data); break; default: /* Generic 501 open data */ write_vector(gspca_dev, spca501_open_data); } /* memorize the wanted pixel format */ mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; /* Enable ISO packet machine CTRL reg=2, * index=1 bitmask=0x2 (bit ordinal 1) */ reg_write(dev, SPCA50X_REG_USB, 0x6, 0x94); switch (mode) { case 0: /* 640x480 */ reg_write(dev, SPCA50X_REG_USB, 0x07, 0x004a); break; case 1: /* 320x240 */ reg_write(dev, SPCA50X_REG_USB, 0x07, 0x104a); break; default: /* case 2: * 160x120 */ reg_write(dev, SPCA50X_REG_USB, 0x07, 0x204a); break; } reg_write(dev, SPCA501_REG_CTLRL, 0x01, 0x02); /* HDG atleast the Intel CreateAndShare needs to have one of its * brightness / contrast / color set otherwise it assumes what seems * max contrast. Note that strange enough setting any of these is * enough to fix the max contrast problem, to be sure we set all 3 */ setbrightness(gspca_dev); setcontrast(gspca_dev); setcolors(gspca_dev); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* Disable ISO packet * machine CTRL reg=2, index=1 bitmask=0x0 (bit ordinal 1) */ reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x01, 0x00); } /* called on streamoff with alt 0 and on disconnect */ static void sd_stop0(struct gspca_dev *gspca_dev) { if (!gspca_dev->present) return; reg_write(gspca_dev->dev, SPCA501_REG_CTLRL, 0x05, 0x00); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { switch (data[0]) { case 0: /* start of frame */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); data += SPCA501_OFFSET_DATA; len -= SPCA501_OFFSET_DATA; gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; case 0xff: /* drop */ /* gspca_dev->last_packet_type = DISCARD_PACKET; */ return; } data++; len--; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->brightness = val; if (gspca_dev->streaming) setbrightness(gspca_dev); return 0; } static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->brightness; return 0; } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return 0; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return 0; } static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->colors = val; if (gspca_dev->streaming) setcolors(gspca_dev); return 0; } static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->colors; return 0; } static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->blue_balance = val; if (gspca_dev->streaming) setblue_balance(gspca_dev); return 0; } static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->blue_balance; return 0; } static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->red_balance = val; if (gspca_dev->streaming) setred_balance(gspca_dev); return 0; } static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->red_balance; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325}, {USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera}, {USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite}, {USB_DEVICE(0x0733, 0x0401), .driver_info = IntelCreateAndShare}, {USB_DEVICE(0x0733, 0x0402), .driver_info = ViewQuestM318B}, {USB_DEVICE(0x1776, 0x501c), .driver_info = Arowana300KCMOSCamera}, {USB_DEVICE(0x0000, 0x0000), .driver_info = MystFromOriUnknownCamera}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
desalesouche/android_kernel_huawei_honor_3.4
drivers/media/video/gspca/jeilinj.c
4956
14415
/* * Jeilinj subdriver * * Supports some Jeilin dual-mode cameras which use bulk transport and * download raw JPEG data. * * Copyright (C) 2009 Theodore Kilgore * * Sportscam DV15 support and control settings are * Copyright (C) 2011 Patrice Chotard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "jeilinj" #include <linux/slab.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/JEILINJ USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JEILINJ_CMD_TIMEOUT 500 #define JEILINJ_CMD_DELAY 160 #define JEILINJ_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JEILINJ_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 0x10 #define FRAME_START 0xFFFFFFFF enum { SAKAR_57379, SPORTSCAM_DV15, }; #define CAMQUALITY_MIN 0 /* highest cam quality */ #define CAMQUALITY_MAX 97 /* lowest cam quality */ enum e_ctrl { LIGHTFREQ, AUTOGAIN, RED, GREEN, BLUE, NCTRLS /* number of controls */ }; /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; int blocks_left; const struct v4l2_pix_format *cap_mode; /* Driver stuff */ u8 type; u8 quality; /* image quality */ #define QUALITY_MIN 35 #define QUALITY_MAX 85 #define QUALITY_DEF 85 u8 jpeg_hdr[JPEG_HDR_SZ]; }; struct jlj_command { unsigned char instruction[2]; unsigned char ack_wanted; unsigned char delay; }; /* AFAICT these cameras will only do 320x240. */ static struct v4l2_pix_format jlj_mode[] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, { 640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0} }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk transfer. */ /* All commands are two bytes only */ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) { pr_err("command write [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } /* Responses are one byte only */ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response) { int retval; if (gspca_dev->usb_err < 0) return; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); response = gspca_dev->usb_buf[0]; if (retval < 0) { pr_err("read command [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 freq_commands[][2] = { {0x71, 0x80}, {0x70, 0x07} }; freq_commands[0][1] |= (sd->ctrls[LIGHTFREQ].val >> 1); jlj_write2(gspca_dev, freq_commands[0]); jlj_write2(gspca_dev, freq_commands[1]); } static void setcamquality(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 quality_commands[][2] = { {0x71, 0x1E}, {0x70, 0x06} }; u8 camquality; /* adapt camera quality from jpeg quality */ camquality = ((QUALITY_MAX - sd->quality) * CAMQUALITY_MAX) / (QUALITY_MAX - QUALITY_MIN); quality_commands[0][1] += camquality; jlj_write2(gspca_dev, quality_commands[0]); jlj_write2(gspca_dev, quality_commands[1]); } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 autogain_commands[][2] = { {0x94, 0x02}, {0xcf, 0x00} }; autogain_commands[1][1] = (sd->ctrls[AUTOGAIN].val << 4); jlj_write2(gspca_dev, autogain_commands[0]); jlj_write2(gspca_dev, autogain_commands[1]); } static void setred(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setred_commands[][2] = { {0x94, 0x02}, {0xe6, 0x00} }; setred_commands[1][1] = sd->ctrls[RED].val; jlj_write2(gspca_dev, setred_commands[0]); jlj_write2(gspca_dev, setred_commands[1]); } static void setgreen(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setgreen_commands[][2] = { {0x94, 0x02}, {0xe7, 0x00} }; setgreen_commands[1][1] = sd->ctrls[GREEN].val; jlj_write2(gspca_dev, setgreen_commands[0]); jlj_write2(gspca_dev, setgreen_commands[1]); } static void setblue(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setblue_commands[][2] = { {0x94, 0x02}, {0xe9, 0x00} }; setblue_commands[1][1] = sd->ctrls[BLUE].val; jlj_write2(gspca_dev, setblue_commands[0]); jlj_write2(gspca_dev, setblue_commands[1]); } static const struct ctrl sd_ctrls[NCTRLS] = { [LIGHTFREQ] = { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light frequency filter", .minimum = V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, /* 1 */ .maximum = V4L2_CID_POWER_LINE_FREQUENCY_60HZ, /* 2 */ .step = 1, .default_value = V4L2_CID_POWER_LINE_FREQUENCY_60HZ, }, .set_control = setfreq }, [AUTOGAIN] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Automatic Gain (and Exposure)", .minimum = 0, .maximum = 3, .step = 1, #define AUTOGAIN_DEF 0 .default_value = AUTOGAIN_DEF, }, .set_control = setautogain }, [RED] = { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0, .maximum = 3, .step = 1, #define RED_BALANCE_DEF 2 .default_value = RED_BALANCE_DEF, }, .set_control = setred }, [GREEN] = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0, .maximum = 3, .step = 1, #define GREEN_BALANCE_DEF 2 .default_value = GREEN_BALANCE_DEF, }, .set_control = setgreen }, [BLUE] = { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0, .maximum = 3, .step = 1, #define BLUE_BALANCE_DEF 2 .default_value = BLUE_BALANCE_DEF, }, .set_control = setblue }, }; static int jlj_start(struct gspca_dev *gspca_dev) { int i; int start_commands_size; u8 response = 0xff; struct sd *sd = (struct sd *) gspca_dev; struct jlj_command start_commands[] = { {{0x71, 0x81}, 0, 0}, {{0x70, 0x05}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x81 - gspca_dev->curr_mode}, 0, 0}, {{0x70, 0x04}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x00}, 0, 0}, /* start streaming ??*/ {{0x70, 0x08}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, #define SPORTSCAM_DV15_CMD_SIZE 9 {{0x94, 0x02}, 0, 0}, {{0xde, 0x24}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xdd, 0xf0}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe3, 0x2c}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe4, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe5, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe6, 0x2c}, 0, 0}, {{0x94, 0x03}, 0, 0}, {{0xaa, 0x00}, 0, 0} }; sd->blocks_left = 0; /* Under Windows, USB spy shows that only the 9 first start * commands are used for SPORTSCAM_DV15 webcam */ if (sd->type == SPORTSCAM_DV15) start_commands_size = SPORTSCAM_DV15_CMD_SIZE; else start_commands_size = ARRAY_SIZE(start_commands); for (i = 0; i < start_commands_size; i++) { jlj_write2(gspca_dev, start_commands[i].instruction); if (start_commands[i].delay) msleep(start_commands[i].delay); if (start_commands[i].ack_wanted) jlj_read1(gspca_dev, response); } setcamquality(gspca_dev); msleep(2); setfreq(gspca_dev); if (gspca_dev->usb_err < 0) PDEBUG(D_ERR, "Start streaming command failed"); return gspca_dev->usb_err; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; int packet_type; u32 header_marker; PDEBUG(D_STREAM, "Got %d bytes out of %d for Block 0", len, JEILINJ_MAX_TRANSFER); if (len != JEILINJ_MAX_TRANSFER) { PDEBUG(D_PACK, "bad length"); goto discard; } /* check if it's start of frame */ header_marker = ((u32 *)data)[0]; if (header_marker == FRAME_START) { sd->blocks_left = data[0x0a] - 1; PDEBUG(D_STREAM, "blocks_left = 0x%x", sd->blocks_left); /* Start a new frame, and add the JPEG header, first thing */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* Toss line 0 of data block 0, keep the rest. */ gspca_frame_add(gspca_dev, INTER_PACKET, data + FRAME_HEADER_LEN, JEILINJ_MAX_TRANSFER - FRAME_HEADER_LEN); } else if (sd->blocks_left > 0) { PDEBUG(D_STREAM, "%d blocks remaining for frame", sd->blocks_left); sd->blocks_left -= 1; if (sd->blocks_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, data, JEILINJ_MAX_TRANSFER); } else goto discard; return; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; dev->type = id->driver_info; gspca_dev->cam.ctrls = dev->ctrls; dev->quality = QUALITY_DEF; cam->cam_mode = jlj_mode; cam->nmodes = ARRAY_SIZE(jlj_mode); cam->bulk = 1; cam->bulk_nurbs = 1; cam->bulk_size = JEILINJ_MAX_TRANSFER; return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { int i; u8 *buf; static u8 stop_commands[][2] = { {0x71, 0x00}, {0x70, 0x09}, {0x71, 0x80}, {0x70, 0x05} }; for (;;) { /* get the image remaining blocks */ usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, JEILINJ_MAX_TRANSFER, NULL, JEILINJ_DATA_TIMEOUT); /* search for 0xff 0xd9 (EOF for JPEG) */ i = 0; buf = gspca_dev->urb[0]->transfer_buffer; while ((i < (JEILINJ_MAX_TRANSFER - 1)) && ((buf[i] != 0xff) || (buf[i+1] != 0xd9))) i++; if (i != (JEILINJ_MAX_TRANSFER - 1)) /* last remaining block found */ break; } for (i = 0; i < ARRAY_SIZE(stop_commands); i++) jlj_write2(gspca_dev, stop_commands[i]); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return gspca_dev->usb_err; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); PDEBUG(D_STREAM, "Start streaming at %dx%d", gspca_dev->height, gspca_dev->width); jlj_start(gspca_dev); return gspca_dev->usb_err; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0280), .driver_info = SAKAR_57379}, {USB_DEVICE(0x0979, 0x0270), .driver_info = SPORTSCAM_DV15}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: switch (menu->index) { case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ strcpy((char *) menu->name, "disable"); return 0; case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ strcpy((char *) menu->name, "50 Hz"); return 0; case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ strcpy((char *) menu->name, "60 Hz"); return 0; } break; } return -EINVAL; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (jcomp->quality < QUALITY_MIN) sd->quality = QUALITY_MIN; else if (jcomp->quality > QUALITY_MAX) sd->quality = QUALITY_MAX; else sd->quality = jcomp->quality; if (gspca_dev->streaming) { jpeg_set_qual(sd->jpeg_hdr, sd->quality); setcamquality(gspca_dev); } return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc_sakar_57379 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* sub-driver description */ static const struct sd_desc sd_desc_sportscam_dv15 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .querymenu = sd_querymenu, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct sd_desc *sd_desc[2] = { &sd_desc_sakar_57379, &sd_desc_sportscam_dv15 }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, sd_desc[id->driver_info], sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
hobit26/h3_linux
drivers/media/video/gspca/spca500.c
4956
29898
/* * SPCA500 chip based cameras initialization data * * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "spca500" #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>"); MODULE_DESCRIPTION("GSPCA/SPCA500 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ unsigned char brightness; unsigned char contrast; unsigned char colors; u8 quality; #define QUALITY_MIN 70 #define QUALITY_MAX 95 #define QUALITY_DEF 85 char subtype; #define AgfaCl20 0 #define AiptekPocketDV 1 #define BenqDC1016 2 #define CreativePCCam300 3 #define DLinkDSC350 4 #define Gsmartmini 5 #define IntelPocketPCCamera 6 #define KodakEZ200 7 #define LogitechClickSmart310 8 #define LogitechClickSmart510 9 #define LogitechTraveler 10 #define MustekGsmart300 11 #define Optimedia 12 #define PalmPixDC85 13 #define ToptroIndus 14 u8 jpeg_hdr[JPEG_HDR_SZ]; }; /* V4L2 controls supported by the driver */ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, #define BRIGHTNESS_DEF 127 .default_value = BRIGHTNESS_DEF, }, .set = sd_setbrightness, .get = sd_getbrightness, }, { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 63, .step = 1, #define CONTRAST_DEF 31 .default_value = CONTRAST_DEF, }, .set = sd_setcontrast, .get = sd_getcontrast, }, { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Color", .minimum = 0, .maximum = 63, .step = 1, #define COLOR_DEF 31 .default_value = COLOR_DEF, }, .set = sd_setcolors, .get = sd_getcolors, }, }; static const struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format sif_mode[] = { {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* Frame packet header offsets for the spca500 */ #define SPCA500_OFFSET_PADDINGLB 2 #define SPCA500_OFFSET_PADDINGHB 3 #define SPCA500_OFFSET_MODE 4 #define SPCA500_OFFSET_IMGWIDTH 5 #define SPCA500_OFFSET_IMGHEIGHT 6 #define SPCA500_OFFSET_IMGMODE 7 #define SPCA500_OFFSET_QTBLINDEX 8 #define SPCA500_OFFSET_FRAMSEQ 9 #define SPCA500_OFFSET_CDSPINFO 10 #define SPCA500_OFFSET_GPIO 11 #define SPCA500_OFFSET_AUGPIO 12 #define SPCA500_OFFSET_DATA 16 static const __u16 spca500_visual_defaults[][3] = { {0x00, 0x0003, 0x816b}, /* SSI not active sync with vsync, * hue (H byte) = 0, * saturation/hue enable, * brightness/contrast enable. */ {0x00, 0x0000, 0x8167}, /* brightness = 0 */ {0x00, 0x0020, 0x8168}, /* contrast = 0 */ {0x00, 0x0003, 0x816b}, /* SSI not active sync with vsync, * hue (H byte) = 0, saturation/hue enable, * brightness/contrast enable. * was 0x0003, now 0x0000. */ {0x00, 0x0000, 0x816a}, /* hue (L byte) = 0 */ {0x00, 0x0020, 0x8169}, /* saturation = 0x20 */ {0x00, 0x0050, 0x8157}, /* edge gain high threshold */ {0x00, 0x0030, 0x8158}, /* edge gain low threshold */ {0x00, 0x0028, 0x8159}, /* edge bandwidth high threshold */ {0x00, 0x000a, 0x815a}, /* edge bandwidth low threshold */ {0x00, 0x0001, 0x8202}, /* clock rate compensation = 1/25 sec/frame */ {0x0c, 0x0004, 0x0000}, /* set interface */ {} }; static const __u16 Clicksmart510_defaults[][3] = { {0x00, 0x00, 0x8211}, {0x00, 0x01, 0x82c0}, {0x00, 0x10, 0x82cb}, {0x00, 0x0f, 0x800d}, {0x00, 0x82, 0x8225}, {0x00, 0x21, 0x8228}, {0x00, 0x00, 0x8203}, {0x00, 0x00, 0x8204}, {0x00, 0x08, 0x8205}, {0x00, 0xf8, 0x8206}, {0x00, 0x28, 0x8207}, {0x00, 0xa0, 0x8208}, {0x00, 0x08, 0x824a}, {0x00, 0x08, 0x8214}, {0x00, 0x80, 0x82c1}, {0x00, 0x00, 0x82c2}, {0x00, 0x00, 0x82ca}, {0x00, 0x80, 0x82c1}, {0x00, 0x04, 0x82c2}, {0x00, 0x00, 0x82ca}, {0x00, 0xfc, 0x8100}, {0x00, 0xfc, 0x8105}, {0x00, 0x30, 0x8101}, {0x00, 0x00, 0x8102}, {0x00, 0x00, 0x8103}, {0x00, 0x66, 0x8107}, {0x00, 0x00, 0x816b}, {0x00, 0x00, 0x8155}, {0x00, 0x01, 0x8156}, {0x00, 0x60, 0x8157}, {0x00, 0x40, 0x8158}, {0x00, 0x0a, 0x8159}, {0x00, 0x06, 0x815a}, {0x00, 0x00, 0x813f}, {0x00, 0x00, 0x8200}, {0x00, 0x19, 0x8201}, {0x00, 0x00, 0x82c1}, {0x00, 0xa0, 0x82c2}, {0x00, 0x00, 0x82ca}, {0x00, 0x00, 0x8117}, {0x00, 0x00, 0x8118}, {0x00, 0x65, 0x8119}, {0x00, 0x00, 0x811a}, {0x00, 0x00, 0x811b}, {0x00, 0x55, 0x811c}, {0x00, 0x65, 0x811d}, {0x00, 0x55, 0x811e}, {0x00, 0x16, 0x811f}, {0x00, 0x19, 0x8120}, {0x00, 0x80, 0x8103}, {0x00, 0x83, 0x816b}, {0x00, 0x25, 0x8168}, {0x00, 0x01, 0x820f}, {0x00, 0xff, 0x8115}, {0x00, 0x48, 0x8116}, {0x00, 0x50, 0x8151}, {0x00, 0x40, 0x8152}, {0x00, 0x78, 0x8153}, {0x00, 0x40, 0x8154}, {0x00, 0x00, 0x8167}, {0x00, 0x20, 0x8168}, {0x00, 0x00, 0x816a}, {0x00, 0x03, 0x816b}, {0x00, 0x20, 0x8169}, {0x00, 0x60, 0x8157}, {0x00, 0x00, 0x8190}, {0x00, 0x00, 0x81a1}, {0x00, 0x00, 0x81b2}, {0x00, 0x27, 0x8191}, {0x00, 0x27, 0x81a2}, {0x00, 0x27, 0x81b3}, {0x00, 0x4b, 0x8192}, {0x00, 0x4b, 0x81a3}, {0x00, 0x4b, 0x81b4}, {0x00, 0x66, 0x8193}, {0x00, 0x66, 0x81a4}, {0x00, 0x66, 0x81b5}, {0x00, 0x79, 0x8194}, {0x00, 0x79, 0x81a5}, {0x00, 0x79, 0x81b6}, {0x00, 0x8a, 0x8195}, {0x00, 0x8a, 0x81a6}, {0x00, 0x8a, 0x81b7}, {0x00, 0x9b, 0x8196}, {0x00, 0x9b, 0x81a7}, {0x00, 0x9b, 0x81b8}, {0x00, 0xa6, 0x8197}, {0x00, 0xa6, 0x81a8}, {0x00, 0xa6, 0x81b9}, {0x00, 0xb2, 0x8198}, {0x00, 0xb2, 0x81a9}, {0x00, 0xb2, 0x81ba}, {0x00, 0xbe, 0x8199}, {0x00, 0xbe, 0x81aa}, {0x00, 0xbe, 0x81bb}, {0x00, 0xc8, 0x819a}, {0x00, 0xc8, 0x81ab}, {0x00, 0xc8, 0x81bc}, {0x00, 0xd2, 0x819b}, {0x00, 0xd2, 0x81ac}, {0x00, 0xd2, 0x81bd}, {0x00, 0xdb, 0x819c}, {0x00, 0xdb, 0x81ad}, {0x00, 0xdb, 0x81be}, {0x00, 0xe4, 0x819d}, {0x00, 0xe4, 0x81ae}, {0x00, 0xe4, 0x81bf}, {0x00, 0xed, 0x819e}, {0x00, 0xed, 0x81af}, {0x00, 0xed, 0x81c0}, {0x00, 0xf7, 0x819f}, {0x00, 0xf7, 0x81b0}, {0x00, 0xf7, 0x81c1}, {0x00, 0xff, 0x81a0}, {0x00, 0xff, 0x81b1}, {0x00, 0xff, 0x81c2}, {0x00, 0x03, 0x8156}, {0x00, 0x00, 0x8211}, {0x00, 0x20, 0x8168}, {0x00, 0x01, 0x8202}, {0x00, 0x30, 0x8101}, {0x00, 0x00, 0x8111}, {0x00, 0x00, 0x8112}, {0x00, 0x00, 0x8113}, {0x00, 0x00, 0x8114}, {} }; static const __u8 qtable_creative_pccam[2][64] = { { /* Q-table Y-components */ 0x05, 0x03, 0x03, 0x05, 0x07, 0x0c, 0x0f, 0x12, 0x04, 0x04, 0x04, 0x06, 0x08, 0x11, 0x12, 0x11, 0x04, 0x04, 0x05, 0x07, 0x0c, 0x11, 0x15, 0x11, 0x04, 0x05, 0x07, 0x09, 0x0f, 0x1a, 0x18, 0x13, 0x05, 0x07, 0x0b, 0x11, 0x14, 0x21, 0x1f, 0x17, 0x07, 0x0b, 0x11, 0x13, 0x18, 0x1f, 0x22, 0x1c, 0x0f, 0x13, 0x17, 0x1a, 0x1f, 0x24, 0x24, 0x1e, 0x16, 0x1c, 0x1d, 0x1d, 0x22, 0x1e, 0x1f, 0x1e}, { /* Q-table C-components */ 0x05, 0x05, 0x07, 0x0e, 0x1e, 0x1e, 0x1e, 0x1e, 0x05, 0x06, 0x08, 0x14, 0x1e, 0x1e, 0x1e, 0x1e, 0x07, 0x08, 0x11, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x0e, 0x14, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e, 0x1e} }; static const __u8 qtable_kodak_ez200[2][64] = { { /* Q-table Y-components */ 0x02, 0x01, 0x01, 0x02, 0x02, 0x04, 0x05, 0x06, 0x01, 0x01, 0x01, 0x02, 0x03, 0x06, 0x06, 0x06, 0x01, 0x01, 0x02, 0x02, 0x04, 0x06, 0x07, 0x06, 0x01, 0x02, 0x02, 0x03, 0x05, 0x09, 0x08, 0x06, 0x02, 0x02, 0x04, 0x06, 0x07, 0x0b, 0x0a, 0x08, 0x02, 0x04, 0x06, 0x06, 0x08, 0x0a, 0x0b, 0x09, 0x05, 0x06, 0x08, 0x09, 0x0a, 0x0c, 0x0c, 0x0a, 0x07, 0x09, 0x0a, 0x0a, 0x0b, 0x0a, 0x0a, 0x0a}, { /* Q-table C-components */ 0x02, 0x02, 0x02, 0x05, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, 0x02, 0x03, 0x07, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, 0x03, 0x06, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x05, 0x07, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a} }; static const __u8 qtable_pocketdv[2][64] = { { /* Q-table Y-components start registers 0x8800 */ 0x06, 0x04, 0x04, 0x06, 0x0a, 0x10, 0x14, 0x18, 0x05, 0x05, 0x06, 0x08, 0x0a, 0x17, 0x18, 0x16, 0x06, 0x05, 0x06, 0x0a, 0x10, 0x17, 0x1c, 0x16, 0x06, 0x07, 0x09, 0x0c, 0x14, 0x23, 0x20, 0x19, 0x07, 0x09, 0x0f, 0x16, 0x1b, 0x2c, 0x29, 0x1f, 0x0a, 0x0e, 0x16, 0x1a, 0x20, 0x2a, 0x2d, 0x25, 0x14, 0x1a, 0x1f, 0x23, 0x29, 0x30, 0x30, 0x28, 0x1d, 0x25, 0x26, 0x27, 0x2d, 0x28, 0x29, 0x28, }, { /* Q-table C-components start registers 0x8840 */ 0x07, 0x07, 0x0a, 0x13, 0x28, 0x28, 0x28, 0x28, 0x07, 0x08, 0x0a, 0x1a, 0x28, 0x28, 0x28, 0x28, 0x0a, 0x0a, 0x16, 0x28, 0x28, 0x28, 0x28, 0x28, 0x13, 0x1a, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28} }; /* read 'len' bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, __u16 index, __u16 length) { usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, length, 500); } static int reg_w(struct gspca_dev *gspca_dev, __u16 req, __u16 index, __u16 value) { int ret; PDEBUG(D_USBO, "reg write: [0x%02x] = 0x%02x", index, value); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); if (ret < 0) pr_err("reg write: error %d\n", ret); return ret; } /* returns: negative is error, pos or zero is data */ static int reg_r_12(struct gspca_dev *gspca_dev, __u16 req, /* bRequest */ __u16 index, /* wIndex */ __u16 length) /* wLength (1 or 2 only) */ { int ret; gspca_dev->usb_buf[1] = 0; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, length, 500); /* timeout */ if (ret < 0) { pr_err("reg_r_12 err %d\n", ret); return ret; } return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]; } /* * Simple function to wait for a given 8-bit value to be returned from * a reg_read call. * Returns: negative is error or timeout, zero is success. */ static int reg_r_wait(struct gspca_dev *gspca_dev, __u16 reg, __u16 index, __u16 value) { int ret, cnt = 20; while (--cnt > 0) { ret = reg_r_12(gspca_dev, reg, index, 1); if (ret == value) return 0; msleep(50); } return -EIO; } static int write_vector(struct gspca_dev *gspca_dev, const __u16 data[][3]) { int ret, i = 0; while (data[i][0] != 0 || data[i][1] != 0 || data[i][2] != 0) { ret = reg_w(gspca_dev, data[i][0], data[i][2], data[i][1]); if (ret < 0) return ret; i++; } return 0; } static int spca50x_setup_qtable(struct gspca_dev *gspca_dev, unsigned int request, unsigned int ybase, unsigned int cbase, const __u8 qtable[2][64]) { int i, err; /* loop over y components */ for (i = 0; i < 64; i++) { err = reg_w(gspca_dev, request, ybase + i, qtable[0][i]); if (err < 0) return err; } /* loop over c components */ for (i = 0; i < 64; i++) { err = reg_w(gspca_dev, request, cbase + i, qtable[1][i]); if (err < 0) return err; } return 0; } static void spca500_ping310(struct gspca_dev *gspca_dev) { reg_r(gspca_dev, 0x0d04, 2); PDEBUG(D_STREAM, "ClickSmart310 ping 0x0d04 0x%02x 0x%02x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); } static void spca500_clksmart310_init(struct gspca_dev *gspca_dev) { reg_r(gspca_dev, 0x0d05, 2); PDEBUG(D_STREAM, "ClickSmart310 init 0x0d05 0x%02x 0x%02x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); reg_w(gspca_dev, 0x00, 0x8167, 0x5a); spca500_ping310(gspca_dev); reg_w(gspca_dev, 0x00, 0x8168, 0x22); reg_w(gspca_dev, 0x00, 0x816a, 0xc0); reg_w(gspca_dev, 0x00, 0x816b, 0x0b); reg_w(gspca_dev, 0x00, 0x8169, 0x25); reg_w(gspca_dev, 0x00, 0x8157, 0x5b); reg_w(gspca_dev, 0x00, 0x8158, 0x5b); reg_w(gspca_dev, 0x00, 0x813f, 0x03); reg_w(gspca_dev, 0x00, 0x8151, 0x4a); reg_w(gspca_dev, 0x00, 0x8153, 0x78); reg_w(gspca_dev, 0x00, 0x0d01, 0x04); /* 00 for adjust shutter */ reg_w(gspca_dev, 0x00, 0x0d02, 0x01); reg_w(gspca_dev, 0x00, 0x8169, 0x25); reg_w(gspca_dev, 0x00, 0x0d01, 0x02); } static void spca500_setmode(struct gspca_dev *gspca_dev, __u8 xmult, __u8 ymult) { int mode; /* set x multiplier */ reg_w(gspca_dev, 0, 0x8001, xmult); /* set y multiplier */ reg_w(gspca_dev, 0, 0x8002, ymult); /* use compressed mode, VGA, with mode specific subsample */ mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv; reg_w(gspca_dev, 0, 0x8003, mode << 4); } static int spca500_full_reset(struct gspca_dev *gspca_dev) { int err; /* send the reset command */ err = reg_w(gspca_dev, 0xe0, 0x0001, 0x0000); if (err < 0) return err; /* wait for the reset to complete */ err = reg_r_wait(gspca_dev, 0x06, 0x0000, 0x0000); if (err < 0) return err; err = reg_w(gspca_dev, 0xe0, 0x0000, 0x0000); if (err < 0) return err; err = reg_r_wait(gspca_dev, 0x06, 0, 0); if (err < 0) { PDEBUG(D_ERR, "reg_r_wait() failed"); return err; } /* all ok */ return 0; } /* Synchro the Bridge with sensor */ /* Maybe that will work on all spca500 chip */ /* because i only own a clicksmart310 try for that chip */ /* using spca50x_set_packet_size() cause an Ooops here */ /* usb_set_interface from kernel 2.6.x clear all the urb stuff */ /* up-port the same feature as in 2.4.x kernel */ static int spca500_synch310(struct gspca_dev *gspca_dev) { if (usb_set_interface(gspca_dev->dev, gspca_dev->iface, 0) < 0) { PDEBUG(D_ERR, "Set packet size: set interface error"); goto error; } spca500_ping310(gspca_dev); reg_r(gspca_dev, 0x0d00, 1); /* need alt setting here */ PDEBUG(D_PACK, "ClickSmart310 sync alt: %d", gspca_dev->alt); /* Windoze use pipe with altsetting 6 why 7 here */ if (usb_set_interface(gspca_dev->dev, gspca_dev->iface, gspca_dev->alt) < 0) { PDEBUG(D_ERR, "Set packet size: set interface error"); goto error; } return 0; error: return -EBUSY; } static void spca500_reinit(struct gspca_dev *gspca_dev) { int err; __u8 Data; /* some unknown command from Aiptek pocket dv and family300 */ reg_w(gspca_dev, 0x00, 0x0d01, 0x01); reg_w(gspca_dev, 0x00, 0x0d03, 0x00); reg_w(gspca_dev, 0x00, 0x0d02, 0x01); /* enable drop packet */ reg_w(gspca_dev, 0x00, 0x850a, 0x0001); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_pocketdv); if (err < 0) PDEBUG(D_ERR|D_STREAM, "spca50x_setup_qtable failed on init"); /* set qtable index */ reg_w(gspca_dev, 0x00, 0x8880, 2); /* family cam Quicksmart stuff */ reg_w(gspca_dev, 0x00, 0x800a, 0x00); /* Set agc transfer: synced between frames */ reg_w(gspca_dev, 0x00, 0x820f, 0x01); /* Init SDRAM - needed for SDRAM access */ reg_w(gspca_dev, 0x00, 0x870a, 0x04); /*Start init sequence or stream */ reg_w(gspca_dev, 0, 0x8003, 0x00); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); msleep(2000); if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) { reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; sd->subtype = id->driver_info; if (sd->subtype != LogitechClickSmart310) { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } else { cam->cam_mode = sif_mode; cam->nmodes = ARRAY_SIZE(sif_mode); } sd->brightness = BRIGHTNESS_DEF; sd->contrast = CONTRAST_DEF; sd->colors = COLOR_DEF; sd->quality = QUALITY_DEF; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* initialisation of spca500 based cameras is deferred */ PDEBUG(D_STREAM, "SPCA500 init"); if (sd->subtype == LogitechClickSmart310) spca500_clksmart310_init(gspca_dev); /* else spca500_initialise(gspca_dev); */ PDEBUG(D_STREAM, "SPCA500 init done"); return 0; } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; __u8 Data; __u8 xmult, ymult; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x22); /* JPEG 411 */ jpeg_set_qual(sd->jpeg_hdr, sd->quality); if (sd->subtype == LogitechClickSmart310) { xmult = 0x16; ymult = 0x12; } else { xmult = 0x28; ymult = 0x1e; } /* is there a sensor here ? */ reg_r(gspca_dev, 0x8a04, 1); PDEBUG(D_STREAM, "Spca500 Sensor Address 0x%02x", gspca_dev->usb_buf[0]); PDEBUG(D_STREAM, "Spca500 curr_mode: %d Xmult: 0x%02x, Ymult: 0x%02x", gspca_dev->curr_mode, xmult, ymult); /* setup qtable */ switch (sd->subtype) { case LogitechClickSmart310: spca500_setmode(gspca_dev, xmult, ymult); /* enable drop packet */ reg_w(gspca_dev, 0x00, 0x850a, 0x0001); reg_w(gspca_dev, 0x00, 0x8880, 3); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_creative_pccam); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); /* Init SDRAM - needed for SDRAM access */ reg_w(gspca_dev, 0x00, 0x870a, 0x04); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); msleep(500); if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) PDEBUG(D_ERR, "reg_r_wait() failed"); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); spca500_synch310(gspca_dev); write_vector(gspca_dev, spca500_visual_defaults); spca500_setmode(gspca_dev, xmult, ymult); /* enable drop packet */ err = reg_w(gspca_dev, 0x00, 0x850a, 0x0001); if (err < 0) PDEBUG(D_ERR, "failed to enable drop packet"); reg_w(gspca_dev, 0x00, 0x8880, 3); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_creative_pccam); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); /* Init SDRAM - needed for SDRAM access */ reg_w(gspca_dev, 0x00, 0x870a, 0x04); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) PDEBUG(D_ERR, "reg_r_wait() failed"); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); break; case CreativePCCam300: /* Creative PC-CAM 300 640x480 CCD */ case IntelPocketPCCamera: /* FIXME: Temporary fix for * Intel Pocket PC Camera * - NWG (Sat 29th March 2003) */ /* do a full reset */ err = spca500_full_reset(gspca_dev); if (err < 0) PDEBUG(D_ERR, "spca500_full_reset failed"); /* enable drop packet */ err = reg_w(gspca_dev, 0x00, 0x850a, 0x0001); if (err < 0) PDEBUG(D_ERR, "failed to enable drop packet"); reg_w(gspca_dev, 0x00, 0x8880, 3); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_creative_pccam); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); spca500_setmode(gspca_dev, xmult, ymult); reg_w(gspca_dev, 0x20, 0x0001, 0x0004); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) PDEBUG(D_ERR, "reg_r_wait() failed"); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); /* write_vector(gspca_dev, spca500_visual_defaults); */ break; case KodakEZ200: /* Kodak EZ200 */ /* do a full reset */ err = spca500_full_reset(gspca_dev); if (err < 0) PDEBUG(D_ERR, "spca500_full_reset failed"); /* enable drop packet */ reg_w(gspca_dev, 0x00, 0x850a, 0x0001); reg_w(gspca_dev, 0x00, 0x8880, 0); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_kodak_ez200); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); spca500_setmode(gspca_dev, xmult, ymult); reg_w(gspca_dev, 0x20, 0x0001, 0x0004); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); if (reg_r_wait(gspca_dev, 0, 0x8000, 0x44) != 0) PDEBUG(D_ERR, "reg_r_wait() failed"); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); /* write_vector(gspca_dev, spca500_visual_defaults); */ break; case BenqDC1016: case DLinkDSC350: /* FamilyCam 300 */ case AiptekPocketDV: /* Aiptek PocketDV */ case Gsmartmini: /*Mustek Gsmart Mini */ case MustekGsmart300: /* Mustek Gsmart 300 */ case PalmPixDC85: case Optimedia: case ToptroIndus: case AgfaCl20: spca500_reinit(gspca_dev); reg_w(gspca_dev, 0x00, 0x0d01, 0x01); /* enable drop packet */ reg_w(gspca_dev, 0x00, 0x850a, 0x0001); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_pocketdv); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); reg_w(gspca_dev, 0x00, 0x8880, 2); /* familycam Quicksmart pocketDV stuff */ reg_w(gspca_dev, 0x00, 0x800a, 0x00); /* Set agc transfer: synced between frames */ reg_w(gspca_dev, 0x00, 0x820f, 0x01); /* Init SDRAM - needed for SDRAM access */ reg_w(gspca_dev, 0x00, 0x870a, 0x04); spca500_setmode(gspca_dev, xmult, ymult); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); reg_r_wait(gspca_dev, 0, 0x8000, 0x44); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); break; case LogitechTraveler: case LogitechClickSmart510: reg_w(gspca_dev, 0x02, 0x00, 0x00); /* enable drop packet */ reg_w(gspca_dev, 0x00, 0x850a, 0x0001); err = spca50x_setup_qtable(gspca_dev, 0x00, 0x8800, 0x8840, qtable_creative_pccam); if (err < 0) PDEBUG(D_ERR, "spca50x_setup_qtable failed"); reg_w(gspca_dev, 0x00, 0x8880, 3); reg_w(gspca_dev, 0x00, 0x800a, 0x00); /* Init SDRAM - needed for SDRAM access */ reg_w(gspca_dev, 0x00, 0x870a, 0x04); spca500_setmode(gspca_dev, xmult, ymult); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); reg_r_wait(gspca_dev, 0, 0x8000, 0x44); reg_r(gspca_dev, 0x816b, 1); Data = gspca_dev->usb_buf[0]; reg_w(gspca_dev, 0x00, 0x816b, Data); write_vector(gspca_dev, Clicksmart510_defaults); break; } return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, 0, 0x8003, 0x00); /* switch to video camera mode */ reg_w(gspca_dev, 0x00, 0x8000, 0x0004); reg_r(gspca_dev, 0x8000, 1); PDEBUG(D_STREAM, "stop SPCA500 done reg8000: 0x%2x", gspca_dev->usb_buf[0]); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int i; static __u8 ffd9[] = {0xff, 0xd9}; /* frames are jpeg 4.1.1 without 0xff escape */ if (data[0] == 0xff) { if (data[1] != 0x01) { /* drop packet */ /* gspca_dev->last_packet_type = DISCARD_PACKET; */ return; } gspca_frame_add(gspca_dev, LAST_PACKET, ffd9, 2); /* put the JPEG header in the new frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); data += SPCA500_OFFSET_DATA; len -= SPCA500_OFFSET_DATA; } else { data += 1; len -= 1; } /* add 0x00 after 0xff */ i = 0; do { if (data[i] == 0xff) { gspca_frame_add(gspca_dev, INTER_PACKET, data, i + 1); len -= i; data += i; *data = 0x00; i = 0; } i++; } while (i < len); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0x00, 0x8167, (__u8) (sd->brightness - 128)); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0x00, 0x8168, sd->contrast); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, 0x00, 0x8169, sd->colors); } static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->brightness = val; if (gspca_dev->streaming) setbrightness(gspca_dev); return 0; } static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->brightness; return 0; } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return 0; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return 0; } static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->colors = val; if (gspca_dev->streaming) setcolors(gspca_dev); return 0; } static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->colors; return 0; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (jcomp->quality < QUALITY_MIN) sd->quality = QUALITY_MIN; else if (jcomp->quality > QUALITY_MAX) sd->quality = QUALITY_MAX; else sd->quality = jcomp->quality; if (gspca_dev->streaming) jpeg_set_qual(sd->jpeg_hdr, sd->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200}, {USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300}, {USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler}, {USB_DEVICE(0x046d, 0x0900), .driver_info = LogitechClickSmart310}, {USB_DEVICE(0x046d, 0x0901), .driver_info = LogitechClickSmart510}, {USB_DEVICE(0x04a5, 0x300c), .driver_info = BenqDC1016}, {USB_DEVICE(0x04fc, 0x7333), .driver_info = PalmPixDC85}, {USB_DEVICE(0x055f, 0xc200), .driver_info = MustekGsmart300}, {USB_DEVICE(0x055f, 0xc220), .driver_info = Gsmartmini}, {USB_DEVICE(0x06bd, 0x0404), .driver_info = AgfaCl20}, {USB_DEVICE(0x06be, 0x0800), .driver_info = Optimedia}, {USB_DEVICE(0x084d, 0x0003), .driver_info = DLinkDSC350}, {USB_DEVICE(0x08ca, 0x0103), .driver_info = AiptekPocketDV}, {USB_DEVICE(0x2899, 0x012c), .driver_info = ToptroIndus}, {USB_DEVICE(0x8086, 0x0630), .driver_info = IntelPocketPCCamera}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
tank0412/android_kernel_ONDA_kylin_mb976a9
drivers/media/video/gspca/stv0680.c
4956
10287
/* * STV0680 USB Camera Driver * * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> * * This module is adapted from the in kernel v4l1 stv680 driver: * * STV0680 USB Camera Driver, by Kevin Sisson (kjsisson@bellsouth.net) * * Thanks to STMicroelectronics for information on the usb commands, and * to Steve Miller at STM for his help and encouragement while I was * writing this driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "stv0680" #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("STV0680 USB Camera Driver"); MODULE_LICENSE("GPL"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_pix_format mode; u8 orig_mode; u8 video_mode; u8 current_mode; }; /* V4L2 controls supported by the driver */ static const struct ctrl sd_ctrls[] = { }; static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val, int size) { int ret = -1; u8 req_type = 0; unsigned int pipe = 0; switch (set) { case 0: /* 0xc1 */ req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 1: /* 0x41 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; case 2: /* 0x80 */ req_type = USB_DIR_IN | USB_RECIP_DEVICE; pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); break; case 3: /* 0x40 */ req_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE; pipe = usb_sndctrlpipe(gspca_dev->dev, 0); break; } ret = usb_control_msg(gspca_dev->dev, pipe, req, req_type, val, 0, gspca_dev->usb_buf, size, 500); if ((ret < 0) && (req != 0x0a)) pr_err("usb_control_msg error %i, request = 0x%x, error = %i\n", set, req, ret); return ret; } static int stv0680_handle_error(struct gspca_dev *gspca_dev, int ret) { stv_sndctrl(gspca_dev, 0, 0x80, 0, 0x02); /* Get Last Error */ PDEBUG(D_ERR, "last error: %i, command = 0x%x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); return ret; } static int stv0680_get_video_mode(struct gspca_dev *gspca_dev) { /* Note not sure if this init of usb_buf is really necessary */ memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = 0x0f; if (stv_sndctrl(gspca_dev, 0, 0x87, 0, 0x08) != 0x08) { PDEBUG(D_ERR, "Get_Camera_Mode failed"); return stv0680_handle_error(gspca_dev, -EIO); } return gspca_dev->usb_buf[0]; /* 01 = VGA, 03 = QVGA, 00 = CIF */ } static int stv0680_set_video_mode(struct gspca_dev *gspca_dev, u8 mode) { struct sd *sd = (struct sd *) gspca_dev; if (sd->current_mode == mode) return 0; memset(gspca_dev->usb_buf, 0, 8); gspca_dev->usb_buf[0] = mode; if (stv_sndctrl(gspca_dev, 3, 0x07, 0x0100, 0x08) != 0x08) { PDEBUG(D_ERR, "Set_Camera_Mode failed"); return stv0680_handle_error(gspca_dev, -EIO); } /* Verify we got what we've asked for */ if (stv0680_get_video_mode(gspca_dev) != mode) { PDEBUG(D_ERR, "Error setting camera video mode!"); return -EIO; } sd->current_mode = mode; return 0; } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { int ret; struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; /* Give the camera some time to settle, otherwise initalization will fail on hotplug, and yes it really needs a full second. */ msleep(1000); /* ping camera to be sure STV0680 is present */ if (stv_sndctrl(gspca_dev, 0, 0x88, 0x5678, 0x02) != 0x02 || gspca_dev->usb_buf[0] != 0x56 || gspca_dev->usb_buf[1] != 0x78) { PDEBUG(D_ERR, "STV(e): camera ping failed!!"); return stv0680_handle_error(gspca_dev, -ENODEV); } /* get camera descriptor */ if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x09) != 0x09) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0200, 0x22) != 0x22 || gspca_dev->usb_buf[7] != 0xa0 || gspca_dev->usb_buf[8] != 0x23) { PDEBUG(D_ERR, "Could not get descriptor 0200."); return stv0680_handle_error(gspca_dev, -ENODEV); } if (stv_sndctrl(gspca_dev, 0, 0x8a, 0, 0x02) != 0x02) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x8b, 0, 0x24) != 0x24) return stv0680_handle_error(gspca_dev, -ENODEV); if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -ENODEV); if (!(gspca_dev->usb_buf[7] & 0x09)) { PDEBUG(D_ERR, "Camera supports neither CIF nor QVGA mode"); return -ENODEV; } if (gspca_dev->usb_buf[7] & 0x01) PDEBUG(D_PROBE, "Camera supports CIF mode"); if (gspca_dev->usb_buf[7] & 0x02) PDEBUG(D_PROBE, "Camera supports VGA mode"); if (gspca_dev->usb_buf[7] & 0x04) PDEBUG(D_PROBE, "Camera supports QCIF mode"); if (gspca_dev->usb_buf[7] & 0x08) PDEBUG(D_PROBE, "Camera supports QVGA mode"); if (gspca_dev->usb_buf[7] & 0x01) sd->video_mode = 0x00; /* CIF */ else sd->video_mode = 0x03; /* QVGA */ /* FW rev, ASIC rev, sensor ID */ PDEBUG(D_PROBE, "Firmware rev is %i.%i", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]); PDEBUG(D_PROBE, "ASIC rev is %i.%i", gspca_dev->usb_buf[2], gspca_dev->usb_buf[3]); PDEBUG(D_PROBE, "Sensor ID is %i", (gspca_dev->usb_buf[4]*16) + (gspca_dev->usb_buf[5]>>4)); ret = stv0680_get_video_mode(gspca_dev); if (ret < 0) return ret; sd->current_mode = sd->orig_mode = ret; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; /* Get mode details */ if (stv_sndctrl(gspca_dev, 0, 0x8f, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); cam->bulk = 1; cam->bulk_nurbs = 1; /* The cam cannot handle more */ cam->bulk_size = (gspca_dev->usb_buf[0] << 24) | (gspca_dev->usb_buf[1] << 16) | (gspca_dev->usb_buf[2] << 8) | (gspca_dev->usb_buf[3]); sd->mode.width = (gspca_dev->usb_buf[4] << 8) | (gspca_dev->usb_buf[5]); /* 322, 356, 644 */ sd->mode.height = (gspca_dev->usb_buf[6] << 8) | (gspca_dev->usb_buf[7]); /* 242, 292, 484 */ sd->mode.pixelformat = V4L2_PIX_FMT_STV0680; sd->mode.field = V4L2_FIELD_NONE; sd->mode.bytesperline = sd->mode.width; sd->mode.sizeimage = cam->bulk_size; sd->mode.colorspace = V4L2_COLORSPACE_SRGB; /* origGain = gspca_dev->usb_buf[12]; */ cam->cam_mode = &sd->mode; cam->nmodes = 1; ret = stv0680_set_video_mode(gspca_dev, sd->orig_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 2, 0x06, 0x0100, 0x12) != 0x12 || gspca_dev->usb_buf[8] != 0x53 || gspca_dev->usb_buf[9] != 0x05) { pr_err("Could not get descriptor 0100\n"); return stv0680_handle_error(gspca_dev, -EIO); } return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { int ret; struct sd *sd = (struct sd *) gspca_dev; ret = stv0680_set_video_mode(gspca_dev, sd->video_mode); if (ret < 0) return ret; if (stv_sndctrl(gspca_dev, 0, 0x85, 0, 0x10) != 0x10) return stv0680_handle_error(gspca_dev, -EIO); /* Start stream at: 0x0000 = CIF (352x288) 0x0100 = VGA (640x480) 0x0300 = QVGA (320x240) */ if (stv_sndctrl(gspca_dev, 1, 0x09, sd->video_mode << 8, 0x0) != 0x0) return stv0680_handle_error(gspca_dev, -EIO); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { /* This is a high priority command; it stops all lower order cmds */ if (stv_sndctrl(gspca_dev, 1, 0x04, 0x0000, 0x0) != 0x0) stv0680_handle_error(gspca_dev, -EIO); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->gspca_dev.present) return; stv0680_set_video_mode(gspca_dev, sd->orig_mode); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* Every now and then the camera sends a 16 byte packet, no idea what it contains, but it is not image data, when this happens the frame received before this packet is corrupt, so discard it. */ if (len != sd->mode.sizeimage) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* Finish the previous frame, we do this upon reception of the next packet, even though it is already complete so that the strange 16 byte packets send after a corrupt frame can discard it. */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); /* Store the just received frame */ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0553, 0x0202)}, {USB_DEVICE(0x041e, 0x4007)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
SteadyQuad/android_kernel_yotaphone2
drivers/media/video/cx88/cx88-blackbird.c
5468
38576
/* * * Support for a cx23416 mpeg encoder via cx2388x host port. * "blackbird" reference design. * * (c) 2004 Jelle Foks <jelle@foks.us> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> * * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * - video_ioctl2 conversion * * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> #include "cx88.h" MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards"); MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); static unsigned int mpegbufs = 32; module_param(mpegbufs,int,0644); MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32"); static unsigned int debug; module_param(debug,int,0644); MODULE_PARM_DESC(debug,"enable debug messages [blackbird]"); #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg) /* ------------------------------------------------------------------ */ #define BLACKBIRD_FIRM_IMAGE_SIZE 376836 /* defines below are from ivtv-driver.h */ #define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF /* Firmware API commands */ #define IVTV_API_STD_TIMEOUT 500 enum blackbird_capture_type { BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_CAPTURE, BLACKBIRD_RAW_PASSTHRU_CAPTURE }; enum blackbird_capture_bits { BLACKBIRD_RAW_BITS_NONE = 0x00, BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01, BLACKBIRD_RAW_BITS_PCM_CAPTURE = 0x02, BLACKBIRD_RAW_BITS_VBI_CAPTURE = 0x04, BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08, BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10 }; enum blackbird_capture_end { BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */ BLACKBIRD_END_NOW, /* stop immediately, no irq */ }; enum blackbird_framerate { BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */ BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */ }; enum blackbird_stream_port { BLACKBIRD_OUTPUT_PORT_MEMORY, BLACKBIRD_OUTPUT_PORT_STREAMING, BLACKBIRD_OUTPUT_PORT_SERIAL }; enum blackbird_data_xfer_status { BLACKBIRD_MORE_BUFFERS_FOLLOW, BLACKBIRD_LAST_BUFFER, }; enum blackbird_picture_mask { BLACKBIRD_PICTURE_MASK_NONE, BLACKBIRD_PICTURE_MASK_I_FRAMES, BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3, BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7, }; enum blackbird_vbi_mode_bits { BLACKBIRD_VBI_BITS_SLICED, BLACKBIRD_VBI_BITS_RAW, }; enum blackbird_vbi_insertion_bits { BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA, BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM = 0x2 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1, }; enum blackbird_dma_unit { BLACKBIRD_DMA_BYTES, BLACKBIRD_DMA_FRAMES, }; enum blackbird_dma_transfer_status_bits { BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01, BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04, BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10, }; enum blackbird_pause { BLACKBIRD_PAUSE_ENCODING, BLACKBIRD_RESUME_ENCODING, }; enum blackbird_copyright { BLACKBIRD_COPYRIGHT_OFF, BLACKBIRD_COPYRIGHT_ON, }; enum blackbird_notification_type { BLACKBIRD_NOTIFICATION_REFRESH, }; enum blackbird_notification_status { BLACKBIRD_NOTIFICATION_OFF, BLACKBIRD_NOTIFICATION_ON, }; enum blackbird_notification_mailbox { BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1, }; enum blackbird_field1_lines { BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */ BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */ BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */ }; enum blackbird_field2_lines { BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */ BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */ BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */ }; enum blackbird_custom_data_type { BLACKBIRD_CUSTOM_EXTENSION_USR_DATA, BLACKBIRD_CUSTOM_PRIVATE_PACKET, }; enum blackbird_mute { BLACKBIRD_UNMUTE, BLACKBIRD_MUTE, }; enum blackbird_mute_video_mask { BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00, BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000, BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000, }; enum blackbird_mute_video_shift { BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8, BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16, BLACKBIRD_MUTE_VIDEO_Y_SHIFT = 24, }; /* Registers */ #define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC /*| IVTV_REG_OFFSET*/) #define IVTV_REG_SPU (0x9050 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_HW_BLOCKS (0x9054 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_VPU (0x9058 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_APU (0xA064 /*| IVTV_REG_OFFSET*/) /* ------------------------------------------------------------------ */ static void host_setup(struct cx88_core *core) { /* toggle reset of the host */ cx_write(MO_GPHST_SOFT_RST, 1); udelay(100); cx_write(MO_GPHST_SOFT_RST, 0); udelay(100); /* host port setup */ cx_write(MO_GPHST_WSC, 0x44444444U); cx_write(MO_GPHST_XFR, 0); cx_write(MO_GPHST_WDTH, 15); cx_write(MO_GPHST_HDSHK, 0); cx_write(MO_GPHST_MUX16, 0x44448888U); cx_write(MO_GPHST_MODE, 0); } /* ------------------------------------------------------------------ */ #define P1_MDATA0 0x390000 #define P1_MDATA1 0x390001 #define P1_MDATA2 0x390002 #define P1_MDATA3 0x390003 #define P1_MADDR2 0x390004 #define P1_MADDR1 0x390005 #define P1_MADDR0 0x390006 #define P1_RDATA0 0x390008 #define P1_RDATA1 0x390009 #define P1_RDATA2 0x39000A #define P1_RDATA3 0x39000B #define P1_RADDR0 0x39000C #define P1_RADDR1 0x39000D #define P1_RRDWR 0x39000E static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state) { unsigned long timeout = jiffies + msecs_to_jiffies(1); u32 gpio0,need; need = state ? 2 : 0; for (;;) { gpio0 = cx_read(MO_GP0_IO) & 2; if (need == gpio0) return 0; if (time_after(jiffies,timeout)) return -1; udelay(1); } } static int memory_write(struct cx88_core *core, u32 address, u32 value) { /* Warning: address is dword address (4 bytes) */ cx_writeb(P1_MDATA0, (unsigned int)value); cx_writeb(P1_MDATA1, (unsigned int)(value >> 8)); cx_writeb(P1_MDATA2, (unsigned int)(value >> 16)); cx_writeb(P1_MDATA3, (unsigned int)(value >> 24)); cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) | 0x40); cx_writeb(P1_MADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_MADDR0, (unsigned int)address); cx_read(P1_MDATA0); cx_read(P1_MADDR0); return wait_ready_gpio0_bit1(core,1); } static int memory_read(struct cx88_core *core, u32 address, u32 *value) { int retval; u32 val; /* Warning: address is dword address (4 bytes) */ cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) & ~0xC0); cx_writeb(P1_MADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_MADDR0, (unsigned int)address); cx_read(P1_MADDR0); retval = wait_ready_gpio0_bit1(core,1); cx_writeb(P1_MDATA3, 0); val = (unsigned char)cx_read(P1_MDATA3) << 24; cx_writeb(P1_MDATA2, 0); val |= (unsigned char)cx_read(P1_MDATA2) << 16; cx_writeb(P1_MDATA1, 0); val |= (unsigned char)cx_read(P1_MDATA1) << 8; cx_writeb(P1_MDATA0, 0); val |= (unsigned char)cx_read(P1_MDATA0); *value = val; return retval; } static int register_write(struct cx88_core *core, u32 address, u32 value) { cx_writeb(P1_RDATA0, (unsigned int)value); cx_writeb(P1_RDATA1, (unsigned int)(value >> 8)); cx_writeb(P1_RDATA2, (unsigned int)(value >> 16)); cx_writeb(P1_RDATA3, (unsigned int)(value >> 24)); cx_writeb(P1_RADDR0, (unsigned int)address); cx_writeb(P1_RADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_RRDWR, 1); cx_read(P1_RDATA0); cx_read(P1_RADDR0); return wait_ready_gpio0_bit1(core,1); } static int register_read(struct cx88_core *core, u32 address, u32 *value) { int retval; u32 val; cx_writeb(P1_RADDR0, (unsigned int)address); cx_writeb(P1_RADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_RRDWR, 0); cx_read(P1_RADDR0); retval = wait_ready_gpio0_bit1(core,1); val = (unsigned char)cx_read(P1_RDATA0); val |= (unsigned char)cx_read(P1_RDATA1) << 8; val |= (unsigned char)cx_read(P1_RDATA2) << 16; val |= (unsigned char)cx_read(P1_RDATA3) << 24; *value = val; return retval; } /* ------------------------------------------------------------------ */ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) { struct cx8802_dev *dev = priv; unsigned long timeout; u32 value, flag, retval; int i; dprintk(1,"%s: 0x%X\n", __func__, command); /* this may not be 100% safe if we can't read any memory location without side effects */ memory_read(dev->core, dev->mailbox - 4, &value); if (value != 0x12345678) { dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n"); return -1; } memory_read(dev->core, dev->mailbox, &flag); if (flag) { dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag); return -1; } flag |= 1; /* tell 'em we're working on it */ memory_write(dev->core, dev->mailbox, flag); /* write command + args + fill remaining with zeros */ memory_write(dev->core, dev->mailbox + 1, command); /* command code */ memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */ for (i = 0; i < in; i++) { memory_write(dev->core, dev->mailbox + 4 + i, data[i]); dprintk(1, "API Input %d = %d\n", i, data[i]); } for (; i < CX2341X_MBOX_MAX_DATA; i++) memory_write(dev->core, dev->mailbox + 4 + i, 0); flag |= 3; /* tell 'em we're done writing */ memory_write(dev->core, dev->mailbox, flag); /* wait for firmware to handle the API command */ timeout = jiffies + msecs_to_jiffies(10); for (;;) { memory_read(dev->core, dev->mailbox, &flag); if (0 != (flag & 4)) break; if (time_after(jiffies,timeout)) { dprintk(0, "ERROR: API Mailbox timeout\n"); return -1; } udelay(10); } /* read output values */ for (i = 0; i < out; i++) { memory_read(dev->core, dev->mailbox + 4 + i, data + i); dprintk(1, "API Output %d = %d\n", i, data[i]); } memory_read(dev->core, dev->mailbox + 2, &retval); dprintk(1, "API result = %d\n",retval); flag = 0; memory_write(dev->core, dev->mailbox, flag); return retval; } /* ------------------------------------------------------------------ */ /* We don't need to call the API often, so using just one mailbox will probably suffice */ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command, u32 inputcnt, u32 outputcnt, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i, err; va_start(vargs, outputcnt); for (i = 0; i < inputcnt; i++) { data[i] = va_arg(vargs, int); } err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data); for (i = 0; i < outputcnt; i++) { int *vptr = va_arg(vargs, int *); *vptr = data[i]; } va_end(vargs); return err; } static int blackbird_find_mailbox(struct cx8802_dev *dev) { u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456}; int signaturecnt=0; u32 value; int i; for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) { memory_read(dev->core, i, &value); if (value == signature[signaturecnt]) signaturecnt++; else signaturecnt = 0; if (4 == signaturecnt) { dprintk(1, "Mailbox signature found\n"); return i+1; } } dprintk(0, "Mailbox signature values not found!\n"); return -1; } static int blackbird_load_firmware(struct cx8802_dev *dev) { static const unsigned char magic[8] = { 0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa }; const struct firmware *firmware; int i, retval = 0; u32 value = 0; u32 checksum = 0; u32 *dataptr; retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED); retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640); retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A); msleep(1); retval |= register_write(dev->core, IVTV_REG_APU, 0); if (retval < 0) dprintk(0, "Error with register_write\n"); retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME, &dev->pci->dev); if (retval != 0) { dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n", CX2341X_FIRM_ENC_FILENAME); dprintk(0, "Please fix your hotplug setup, the board will " "not work without firmware loaded!\n"); return -1; } if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) { dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n", firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE); release_firmware(firmware); return -1; } if (0 != memcmp(firmware->data, magic, 8)) { dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); release_firmware(firmware); return -1; } /* transfer to the chip */ dprintk(1,"Loading firmware ...\n"); dataptr = (u32*)firmware->data; for (i = 0; i < (firmware->size >> 2); i++) { value = *dataptr; checksum += ~value; memory_write(dev->core, i, value); dataptr++; } /* read back to verify with the checksum */ for (i--; i >= 0; i--) { memory_read(dev->core, i, &value); checksum -= ~value; } if (checksum) { dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n"); release_firmware(firmware); return -1; } release_firmware(firmware); dprintk(0, "Firmware upload successful.\n"); retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); retval |= register_read(dev->core, IVTV_REG_SPU, &value); retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE); msleep(1); retval |= register_read(dev->core, IVTV_REG_VPU, &value); retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8); if (retval < 0) dprintk(0, "Error with register_write\n"); return 0; } /** Settings used by the windows tv app for PVR2000: ================================================================================================================= Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode ----------------------------------------------------------------------------------------------------------------- MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo ================================================================================================================= *DB: "DirectBurn" */ static void blackbird_codec_settings(struct cx8802_dev *dev) { /* assign frame size */ blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, dev->height, dev->width); dev->params.width = dev->width; dev->params.height = dev->height; dev->params.is_50hz = (dev->core->tvnorm & V4L2_STD_625_50) != 0; cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params); } static int blackbird_initialize_codec(struct cx8802_dev *dev) { struct cx88_core *core = dev->core; int version; int retval; dprintk(1,"Initialize codec\n"); retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ if (retval < 0) { dev->mpeg_active = 0; /* ping was not successful, reset and upload firmware */ cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */ cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */ retval = blackbird_load_firmware(dev); if (retval < 0) return retval; retval = blackbird_find_mailbox(dev); if (retval < 0) return -1; dev->mailbox = retval; retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ if (retval < 0) { dprintk(0, "ERROR: Firmware ping failed!\n"); return -1; } retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version); if (retval < 0) { dprintk(0, "ERROR: Firmware get encoder version failed!\n"); return -1; } dprintk(0, "Firmware version is 0x%08x\n", version); } cx_write(MO_PINMUX_IO, 0x88); /* 656-8bit IO and enable MPEG parallel IO */ cx_clear(MO_INPUT_FORMAT, 0x100); /* chroma subcarrier lock to normal? */ cx_write(MO_VBOS_CONTROL, 0x84A00); /* no 656 mode, 8-bit pixels, disable VBI */ cx_clear(MO_OUTPUT_FORMAT, 0x0008); /* Normal Y-limits to let the mpeg encoder sync */ blackbird_codec_settings(dev); blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0, BLACKBIRD_FIELD1_SAA7115, BLACKBIRD_FIELD2_SAA7115 ); blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0, BLACKBIRD_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); return 0; } static int blackbird_start_codec(struct file *file, void *priv) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; /* start capturing to the host interface */ u32 reg; int i; int lastchange = -1; int lastval = 0; for (i = 0; (i < 10) && (i < (lastchange + 4)); i++) { reg = cx_read(AUD_STATUS); dprintk(1, "AUD_STATUS:%dL: 0x%x\n", i, reg); if ((reg & 0x0F) != lastval) { lastval = reg & 0x0F; lastchange = i; } msleep(100); } /* unmute audio source */ cx_clear(AUD_VOL_CTL, (1 << 6)); blackbird_api_cmd(dev, CX2341X_ENC_REFRESH_INPUT, 0, 0); /* initialize the video input */ blackbird_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0); /* start capturing to the host interface */ blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0, BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE ); dev->mpeg_active = 1; return 0; } static int blackbird_stop_codec(struct cx8802_dev *dev) { blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, BLACKBIRD_END_NOW, BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE ); dev->mpeg_active = 0; return 0; } /* ------------------------------------------------------------------ */ static int bb_buf_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx8802_fh *fh = q->priv_data; fh->dev->ts_packet_size = 188 * 4; /* was: 512 */ fh->dev->ts_packet_count = mpegbufs; /* was: 100 */ *size = fh->dev->ts_packet_size * fh->dev->ts_packet_count; *count = fh->dev->ts_packet_count; return 0; } static int bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx8802_fh *fh = q->priv_data; return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field); } static void bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx8802_fh *fh = q->priv_data; cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb); } static void bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { cx88_free_buffer(q, (struct cx88_buffer*)vb); } static struct videobuf_queue_ops blackbird_qops = { .buf_setup = bb_buf_setup, .buf_prepare = bb_buf_prepare, .buf_queue = bb_buf_queue, .buf_release = bb_buf_release, }; /* ------------------------------------------------------------------ */ static const u32 *ctrl_classes[] = { cx88_user_ctrls, cx2341x_mpeg_ctrls, NULL }; static int blackbird_queryctrl(struct cx8802_dev *dev, struct v4l2_queryctrl *qctrl) { qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (qctrl->id == 0) return -EINVAL; /* Standard V4L2 controls */ if (cx8800_ctrl_query(dev->core, qctrl) == 0) return 0; /* MPEG V4L2 controls */ if (cx2341x_ctrl_query(&dev->params, qctrl)) qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; return 0; } /* ------------------------------------------------------------------ */ /* IOCTL Handlers */ static int vidioc_querymenu (struct file *file, void *priv, struct v4l2_querymenu *qmenu) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct v4l2_queryctrl qctrl; qctrl.id = qmenu->id; blackbird_queryctrl(dev, &qctrl); return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(&dev->params, qmenu->id)); } static int vidioc_querycap (struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; strcpy(cap->driver, "cx88_blackbird"); strlcpy(cap->card, core->board.name, sizeof(cap->card)); sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (UNSET != core->board.tuner_type) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; strlcpy(f->description, "MPEG", sizeof(f->description)); f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int vidioc_g_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */ f->fmt.pix.colorspace = 0; f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.field = fh->mpegq.field; dprintk(0,"VIDIOC_G_FMT: w: %d, h: %d, f: %d\n", dev->width, dev->height, fh->mpegq.field ); return 0; } static int vidioc_try_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */; f->fmt.pix.colorspace = 0; dprintk(0,"VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n", dev->width, dev->height, fh->mpegq.field ); return 0; } static int vidioc_s_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; struct cx88_core *core = dev->core; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */; f->fmt.pix.colorspace = 0; dev->width = f->fmt.pix.width; dev->height = f->fmt.pix.height; fh->mpegq.field = f->fmt.pix.field; cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field); blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, f->fmt.pix.height, f->fmt.pix.width); dprintk(0,"VIDIOC_S_FMT: w: %d, h: %d, f: %d\n", f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field ); return 0; } static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct cx8802_fh *fh = priv; return (videobuf_reqbufs(&fh->mpegq, p)); } static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_querybuf(&fh->mpegq, p)); } static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_qbuf(&fh->mpegq, p)); } static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_dqbuf(&fh->mpegq, p, file->f_flags & O_NONBLOCK)); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8802_fh *fh = priv; return videobuf_streamon(&fh->mpegq); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8802_fh *fh = priv; return videobuf_streamoff(&fh->mpegq); } static int vidioc_g_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; return cx2341x_ext_ctrls(&dev->params, 0, f, VIDIOC_G_EXT_CTRLS); } static int vidioc_s_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; if (dev->mpeg_active) blackbird_stop_codec(dev); p = dev->params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS); if (!err) { err = cx2341x_update(dev, blackbird_mbox_func, &dev->params, &p); dev->params = p; } return err; } static int vidioc_try_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; p = dev->params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS); return err; } static int vidioc_s_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; struct cx88_core *core = dev->core; if (dev->mpeg_active) blackbird_stop_codec(dev); cx88_set_freq (core,f); blackbird_initialize_codec(dev); cx88_set_scale(dev->core, dev->width, dev->height, fh->mpegq.field); return 0; } static int vidioc_log_status (struct file *file, void *priv) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; char name[32 + 2]; snprintf(name, sizeof(name), "%s/2", core->name); printk("%s/2: ============ START LOG STATUS ============\n", core->name); call_all(core, core, log_status); cx2341x_log_status(&dev->params, name); printk("%s/2: ============= END LOG STATUS =============\n", core->name); return 0; } static int vidioc_queryctrl (struct file *file, void *priv, struct v4l2_queryctrl *qctrl) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; if (blackbird_queryctrl(dev, qctrl) == 0) return 0; qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (unlikely(qctrl->id == 0)) return -EINVAL; return cx8800_ctrl_query(dev->core, qctrl); } static int vidioc_enum_input (struct file *file, void *priv, struct v4l2_input *i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_enum_input (core,i); } static int vidioc_g_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_get_control(core,ctl); } static int vidioc_s_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_set_control(core,ctl); } static int vidioc_g_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8802_fh *fh = priv; struct cx88_core *core = fh->dev->core; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; f->type = V4L2_TUNER_ANALOG_TV; f->frequency = core->freq; call_all(core, tuner, g_frequency, f); return 0; } static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; *i = core->input; return 0; } static int vidioc_s_input (struct file *file, void *priv, unsigned int i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; if (i >= 4) return -EINVAL; mutex_lock(&core->lock); cx88_newstation(core); cx88_video_mux(core,i); mutex_unlock(&core->lock); return 0; } static int vidioc_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; u32 reg; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; cx88_get_stereo(core ,t); reg = cx_read(MO_DEVICE_STATUS); t->signal = (reg & (1<<5)) ? 0xffff : 0x0000; return 0; } static int vidioc_s_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; if (UNSET == core->board.tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; cx88_set_stereo(core, t->audmode, 1); return 0; } static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; mutex_lock(&core->lock); cx88_set_tvnorm(core,*id); mutex_unlock(&core->lock); return 0; } /* FIXME: cx88_ioctl_hook not implemented */ static int mpeg_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx8802_dev *dev = video_drvdata(file); struct cx8802_fh *fh; struct cx8802_driver *drv = NULL; int err; dprintk( 1, "%s\n", __func__); mutex_lock(&dev->core->lock); /* Make sure we can acquire the hardware */ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); if (!drv) { dprintk(1, "%s: blackbird driver is not loaded\n", __func__); mutex_unlock(&dev->core->lock); return -ENODEV; } err = drv->request_acquire(drv); if (err != 0) { dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err); mutex_unlock(&dev->core->lock); return err; } if (!dev->core->mpeg_users && blackbird_initialize_codec(dev) < 0) { drv->request_release(drv); mutex_unlock(&dev->core->lock); return -EINVAL; } dprintk(1, "open dev=%s\n", video_device_node_name(vdev)); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh),GFP_KERNEL); if (NULL == fh) { drv->request_release(drv); mutex_unlock(&dev->core->lock); return -ENOMEM; } file->private_data = fh; fh->dev = dev; videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx88_buffer), fh, NULL); /* FIXME: locking against other video device */ cx88_set_scale(dev->core, dev->width, dev->height, fh->mpegq.field); dev->core->mpeg_users++; mutex_unlock(&dev->core->lock); return 0; } static int mpeg_release(struct file *file) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; struct cx8802_driver *drv = NULL; mutex_lock(&dev->core->lock); if (dev->mpeg_active && dev->core->mpeg_users == 1) blackbird_stop_codec(dev); cx8802_cancel_buffers(fh->dev); /* stop mpeg capture */ videobuf_stop(&fh->mpegq); videobuf_mmap_free(&fh->mpegq); file->private_data = NULL; kfree(fh); /* Make sure we release the hardware */ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); WARN_ON(!drv); if (drv) drv->request_release(drv); dev->core->mpeg_users--; mutex_unlock(&dev->core->lock); return 0; } static ssize_t mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; if (!dev->mpeg_active) blackbird_start_codec(file, fh); return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); } static unsigned int mpeg_poll(struct file *file, struct poll_table_struct *wait) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; if (!dev->mpeg_active) blackbird_start_codec(file, fh); return videobuf_poll_stream(file, &fh->mpegq, wait); } static int mpeg_mmap(struct file *file, struct vm_area_struct * vma) { struct cx8802_fh *fh = file->private_data; return videobuf_mmap_mapper(&fh->mpegq, vma); } static const struct v4l2_file_operations mpeg_fops = { .owner = THIS_MODULE, .open = mpeg_open, .release = mpeg_release, .read = mpeg_read, .poll = mpeg_poll, .mmap = mpeg_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { .vidioc_querymenu = vidioc_querymenu, .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = vidioc_log_status, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_s_std = vidioc_s_std, }; static struct video_device cx8802_mpeg_template = { .name = "cx8802", .fops = &mpeg_fops, .ioctl_ops = &mpeg_ioctl_ops, .tvnorms = CX88_NORMS, .current_norm = V4L2_STD_NTSC_M, }; /* ------------------------------------------------------------------ */ /* The CX8802 MPEG API will call this when we can use the hardware */ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* By default, core setup will leave the cx22702 out of reset, on the bus. * We left the hardware on power up with the cx22702 active. * We're being given access to re-arrange the GPIOs. * Take the bus off the cx22702 and put the cx23416 on it. */ /* Toggle reset on cx22702 leaving i2c active */ cx_set(MO_GP0_IO, 0x00000080); udelay(1000); cx_clear(MO_GP0_IO, 0x00000080); udelay(50); cx_set(MO_GP0_IO, 0x00000080); udelay(1000); /* tri-state the cx22702 pins */ cx_set(MO_GP0_IO, 0x00000004); udelay(1000); break; default: err = -ENODEV; } return err; } /* The CX8802 MPEG API will call this when we need to release the hardware */ static int cx8802_blackbird_advise_release(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* Exit leaving the cx23416 on the bus */ break; default: err = -ENODEV; } return err; } static void blackbird_unregister_video(struct cx8802_dev *dev) { if (dev->mpeg_dev) { if (video_is_registered(dev->mpeg_dev)) video_unregister_device(dev->mpeg_dev); else video_device_release(dev->mpeg_dev); dev->mpeg_dev = NULL; } } static int blackbird_register_video(struct cx8802_dev *dev) { int err; dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci, &cx8802_mpeg_template,"mpeg"); video_set_drvdata(dev->mpeg_dev, dev); err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1); if (err < 0) { printk(KERN_INFO "%s/2: can't register mpeg device\n", dev->core->name); return err; } printk(KERN_INFO "%s/2: registered device %s [mpeg]\n", dev->core->name, video_device_node_name(dev->mpeg_dev)); return 0; } /* ----------------------------------------------------------- */ static int cx8802_blackbird_probe(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; struct cx8802_dev *dev = core->dvbdev; int err; dprintk( 1, "%s\n", __func__); dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n", core->boardnr, core->name, core->pci_bus, core->pci_slot); err = -ENODEV; if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD)) goto fail_core; dev->width = 720; dev->height = 576; cx2341x_fill_defaults(&dev->params); dev->params.port = CX2341X_PORT_STREAMING; cx8802_mpeg_template.current_norm = core->tvnorm; if (core->tvnorm & V4L2_STD_525_60) { dev->height = 480; } else { dev->height = 576; } /* blackbird stuff */ printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n", core->name); host_setup(dev->core); blackbird_initialize_codec(dev); blackbird_register_video(dev); /* initial device configuration: needed ? */ // init_controls(core); cx88_set_tvnorm(core,core->tvnorm); cx88_video_mux(core,0); return 0; fail_core: return err; } static int cx8802_blackbird_remove(struct cx8802_driver *drv) { /* blackbird */ blackbird_unregister_video(drv->core->dvbdev); return 0; } static struct cx8802_driver cx8802_blackbird_driver = { .type_id = CX88_MPEG_BLACKBIRD, .hw_access = CX8802_DRVCTL_SHARED, .probe = cx8802_blackbird_probe, .remove = cx8802_blackbird_remove, .advise_acquire = cx8802_blackbird_advise_acquire, .advise_release = cx8802_blackbird_advise_release, }; static int __init blackbird_init(void) { printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n", CX88_VERSION); return cx8802_register_driver(&cx8802_blackbird_driver); } static void __exit blackbird_fini(void) { cx8802_unregister_driver(&cx8802_blackbird_driver); } module_init(blackbird_init); module_exit(blackbird_fini); module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644); MODULE_PARM_DESC(debug,"enable debug messages [video]");
gpl-2.0
chaosagent/DNA_JB_Kernel
drivers/media/video/cx88/cx88-blackbird.c
5468
38576
/* * * Support for a cx23416 mpeg encoder via cx2388x host port. * "blackbird" reference design. * * (c) 2004 Jelle Foks <jelle@foks.us> * (c) 2004 Gerd Knorr <kraxel@bytesex.org> * * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org> * - video_ioctl2 conversion * * Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> #include "cx88.h" MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards"); MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); MODULE_VERSION(CX88_VERSION); static unsigned int mpegbufs = 32; module_param(mpegbufs,int,0644); MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32"); static unsigned int debug; module_param(debug,int,0644); MODULE_PARM_DESC(debug,"enable debug messages [blackbird]"); #define dprintk(level,fmt, arg...) if (debug >= level) \ printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg) /* ------------------------------------------------------------------ */ #define BLACKBIRD_FIRM_IMAGE_SIZE 376836 /* defines below are from ivtv-driver.h */ #define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF /* Firmware API commands */ #define IVTV_API_STD_TIMEOUT 500 enum blackbird_capture_type { BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_CAPTURE, BLACKBIRD_RAW_PASSTHRU_CAPTURE }; enum blackbird_capture_bits { BLACKBIRD_RAW_BITS_NONE = 0x00, BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01, BLACKBIRD_RAW_BITS_PCM_CAPTURE = 0x02, BLACKBIRD_RAW_BITS_VBI_CAPTURE = 0x04, BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08, BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10 }; enum blackbird_capture_end { BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */ BLACKBIRD_END_NOW, /* stop immediately, no irq */ }; enum blackbird_framerate { BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */ BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */ }; enum blackbird_stream_port { BLACKBIRD_OUTPUT_PORT_MEMORY, BLACKBIRD_OUTPUT_PORT_STREAMING, BLACKBIRD_OUTPUT_PORT_SERIAL }; enum blackbird_data_xfer_status { BLACKBIRD_MORE_BUFFERS_FOLLOW, BLACKBIRD_LAST_BUFFER, }; enum blackbird_picture_mask { BLACKBIRD_PICTURE_MASK_NONE, BLACKBIRD_PICTURE_MASK_I_FRAMES, BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3, BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7, }; enum blackbird_vbi_mode_bits { BLACKBIRD_VBI_BITS_SLICED, BLACKBIRD_VBI_BITS_RAW, }; enum blackbird_vbi_insertion_bits { BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA, BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM = 0x2 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1, BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1, }; enum blackbird_dma_unit { BLACKBIRD_DMA_BYTES, BLACKBIRD_DMA_FRAMES, }; enum blackbird_dma_transfer_status_bits { BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01, BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04, BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10, }; enum blackbird_pause { BLACKBIRD_PAUSE_ENCODING, BLACKBIRD_RESUME_ENCODING, }; enum blackbird_copyright { BLACKBIRD_COPYRIGHT_OFF, BLACKBIRD_COPYRIGHT_ON, }; enum blackbird_notification_type { BLACKBIRD_NOTIFICATION_REFRESH, }; enum blackbird_notification_status { BLACKBIRD_NOTIFICATION_OFF, BLACKBIRD_NOTIFICATION_ON, }; enum blackbird_notification_mailbox { BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1, }; enum blackbird_field1_lines { BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */ BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */ BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */ }; enum blackbird_field2_lines { BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */ BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */ BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */ }; enum blackbird_custom_data_type { BLACKBIRD_CUSTOM_EXTENSION_USR_DATA, BLACKBIRD_CUSTOM_PRIVATE_PACKET, }; enum blackbird_mute { BLACKBIRD_UNMUTE, BLACKBIRD_MUTE, }; enum blackbird_mute_video_mask { BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00, BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000, BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000, }; enum blackbird_mute_video_shift { BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8, BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16, BLACKBIRD_MUTE_VIDEO_Y_SHIFT = 24, }; /* Registers */ #define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC /*| IVTV_REG_OFFSET*/) #define IVTV_REG_SPU (0x9050 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_HW_BLOCKS (0x9054 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_VPU (0x9058 /*| IVTV_REG_OFFSET*/) #define IVTV_REG_APU (0xA064 /*| IVTV_REG_OFFSET*/) /* ------------------------------------------------------------------ */ static void host_setup(struct cx88_core *core) { /* toggle reset of the host */ cx_write(MO_GPHST_SOFT_RST, 1); udelay(100); cx_write(MO_GPHST_SOFT_RST, 0); udelay(100); /* host port setup */ cx_write(MO_GPHST_WSC, 0x44444444U); cx_write(MO_GPHST_XFR, 0); cx_write(MO_GPHST_WDTH, 15); cx_write(MO_GPHST_HDSHK, 0); cx_write(MO_GPHST_MUX16, 0x44448888U); cx_write(MO_GPHST_MODE, 0); } /* ------------------------------------------------------------------ */ #define P1_MDATA0 0x390000 #define P1_MDATA1 0x390001 #define P1_MDATA2 0x390002 #define P1_MDATA3 0x390003 #define P1_MADDR2 0x390004 #define P1_MADDR1 0x390005 #define P1_MADDR0 0x390006 #define P1_RDATA0 0x390008 #define P1_RDATA1 0x390009 #define P1_RDATA2 0x39000A #define P1_RDATA3 0x39000B #define P1_RADDR0 0x39000C #define P1_RADDR1 0x39000D #define P1_RRDWR 0x39000E static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state) { unsigned long timeout = jiffies + msecs_to_jiffies(1); u32 gpio0,need; need = state ? 2 : 0; for (;;) { gpio0 = cx_read(MO_GP0_IO) & 2; if (need == gpio0) return 0; if (time_after(jiffies,timeout)) return -1; udelay(1); } } static int memory_write(struct cx88_core *core, u32 address, u32 value) { /* Warning: address is dword address (4 bytes) */ cx_writeb(P1_MDATA0, (unsigned int)value); cx_writeb(P1_MDATA1, (unsigned int)(value >> 8)); cx_writeb(P1_MDATA2, (unsigned int)(value >> 16)); cx_writeb(P1_MDATA3, (unsigned int)(value >> 24)); cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) | 0x40); cx_writeb(P1_MADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_MADDR0, (unsigned int)address); cx_read(P1_MDATA0); cx_read(P1_MADDR0); return wait_ready_gpio0_bit1(core,1); } static int memory_read(struct cx88_core *core, u32 address, u32 *value) { int retval; u32 val; /* Warning: address is dword address (4 bytes) */ cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) & ~0xC0); cx_writeb(P1_MADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_MADDR0, (unsigned int)address); cx_read(P1_MADDR0); retval = wait_ready_gpio0_bit1(core,1); cx_writeb(P1_MDATA3, 0); val = (unsigned char)cx_read(P1_MDATA3) << 24; cx_writeb(P1_MDATA2, 0); val |= (unsigned char)cx_read(P1_MDATA2) << 16; cx_writeb(P1_MDATA1, 0); val |= (unsigned char)cx_read(P1_MDATA1) << 8; cx_writeb(P1_MDATA0, 0); val |= (unsigned char)cx_read(P1_MDATA0); *value = val; return retval; } static int register_write(struct cx88_core *core, u32 address, u32 value) { cx_writeb(P1_RDATA0, (unsigned int)value); cx_writeb(P1_RDATA1, (unsigned int)(value >> 8)); cx_writeb(P1_RDATA2, (unsigned int)(value >> 16)); cx_writeb(P1_RDATA3, (unsigned int)(value >> 24)); cx_writeb(P1_RADDR0, (unsigned int)address); cx_writeb(P1_RADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_RRDWR, 1); cx_read(P1_RDATA0); cx_read(P1_RADDR0); return wait_ready_gpio0_bit1(core,1); } static int register_read(struct cx88_core *core, u32 address, u32 *value) { int retval; u32 val; cx_writeb(P1_RADDR0, (unsigned int)address); cx_writeb(P1_RADDR1, (unsigned int)(address >> 8)); cx_writeb(P1_RRDWR, 0); cx_read(P1_RADDR0); retval = wait_ready_gpio0_bit1(core,1); val = (unsigned char)cx_read(P1_RDATA0); val |= (unsigned char)cx_read(P1_RDATA1) << 8; val |= (unsigned char)cx_read(P1_RDATA2) << 16; val |= (unsigned char)cx_read(P1_RDATA3) << 24; *value = val; return retval; } /* ------------------------------------------------------------------ */ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA]) { struct cx8802_dev *dev = priv; unsigned long timeout; u32 value, flag, retval; int i; dprintk(1,"%s: 0x%X\n", __func__, command); /* this may not be 100% safe if we can't read any memory location without side effects */ memory_read(dev->core, dev->mailbox - 4, &value); if (value != 0x12345678) { dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n"); return -1; } memory_read(dev->core, dev->mailbox, &flag); if (flag) { dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag); return -1; } flag |= 1; /* tell 'em we're working on it */ memory_write(dev->core, dev->mailbox, flag); /* write command + args + fill remaining with zeros */ memory_write(dev->core, dev->mailbox + 1, command); /* command code */ memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */ for (i = 0; i < in; i++) { memory_write(dev->core, dev->mailbox + 4 + i, data[i]); dprintk(1, "API Input %d = %d\n", i, data[i]); } for (; i < CX2341X_MBOX_MAX_DATA; i++) memory_write(dev->core, dev->mailbox + 4 + i, 0); flag |= 3; /* tell 'em we're done writing */ memory_write(dev->core, dev->mailbox, flag); /* wait for firmware to handle the API command */ timeout = jiffies + msecs_to_jiffies(10); for (;;) { memory_read(dev->core, dev->mailbox, &flag); if (0 != (flag & 4)) break; if (time_after(jiffies,timeout)) { dprintk(0, "ERROR: API Mailbox timeout\n"); return -1; } udelay(10); } /* read output values */ for (i = 0; i < out; i++) { memory_read(dev->core, dev->mailbox + 4 + i, data + i); dprintk(1, "API Output %d = %d\n", i, data[i]); } memory_read(dev->core, dev->mailbox + 2, &retval); dprintk(1, "API result = %d\n",retval); flag = 0; memory_write(dev->core, dev->mailbox, flag); return retval; } /* ------------------------------------------------------------------ */ /* We don't need to call the API often, so using just one mailbox will probably suffice */ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command, u32 inputcnt, u32 outputcnt, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i, err; va_start(vargs, outputcnt); for (i = 0; i < inputcnt; i++) { data[i] = va_arg(vargs, int); } err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data); for (i = 0; i < outputcnt; i++) { int *vptr = va_arg(vargs, int *); *vptr = data[i]; } va_end(vargs); return err; } static int blackbird_find_mailbox(struct cx8802_dev *dev) { u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456}; int signaturecnt=0; u32 value; int i; for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) { memory_read(dev->core, i, &value); if (value == signature[signaturecnt]) signaturecnt++; else signaturecnt = 0; if (4 == signaturecnt) { dprintk(1, "Mailbox signature found\n"); return i+1; } } dprintk(0, "Mailbox signature values not found!\n"); return -1; } static int blackbird_load_firmware(struct cx8802_dev *dev) { static const unsigned char magic[8] = { 0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa }; const struct firmware *firmware; int i, retval = 0; u32 value = 0; u32 checksum = 0; u32 *dataptr; retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED); retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640); retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A); msleep(1); retval |= register_write(dev->core, IVTV_REG_APU, 0); if (retval < 0) dprintk(0, "Error with register_write\n"); retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME, &dev->pci->dev); if (retval != 0) { dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n", CX2341X_FIRM_ENC_FILENAME); dprintk(0, "Please fix your hotplug setup, the board will " "not work without firmware loaded!\n"); return -1; } if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) { dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n", firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE); release_firmware(firmware); return -1; } if (0 != memcmp(firmware->data, magic, 8)) { dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n"); release_firmware(firmware); return -1; } /* transfer to the chip */ dprintk(1,"Loading firmware ...\n"); dataptr = (u32*)firmware->data; for (i = 0; i < (firmware->size >> 2); i++) { value = *dataptr; checksum += ~value; memory_write(dev->core, i, value); dataptr++; } /* read back to verify with the checksum */ for (i--; i >= 0; i--) { memory_read(dev->core, i, &value); checksum -= ~value; } if (checksum) { dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n"); release_firmware(firmware); return -1; } release_firmware(firmware); dprintk(0, "Firmware upload successful.\n"); retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST); retval |= register_read(dev->core, IVTV_REG_SPU, &value); retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE); msleep(1); retval |= register_read(dev->core, IVTV_REG_VPU, &value); retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8); if (retval < 0) dprintk(0, "Error with register_write\n"); return 0; } /** Settings used by the windows tv app for PVR2000: ================================================================================================================= Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode ----------------------------------------------------------------------------------------------------------------- MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo ================================================================================================================= *DB: "DirectBurn" */ static void blackbird_codec_settings(struct cx8802_dev *dev) { /* assign frame size */ blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, dev->height, dev->width); dev->params.width = dev->width; dev->params.height = dev->height; dev->params.is_50hz = (dev->core->tvnorm & V4L2_STD_625_50) != 0; cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params); } static int blackbird_initialize_codec(struct cx8802_dev *dev) { struct cx88_core *core = dev->core; int version; int retval; dprintk(1,"Initialize codec\n"); retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ if (retval < 0) { dev->mpeg_active = 0; /* ping was not successful, reset and upload firmware */ cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */ cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */ retval = blackbird_load_firmware(dev); if (retval < 0) return retval; retval = blackbird_find_mailbox(dev); if (retval < 0) return -1; dev->mailbox = retval; retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */ if (retval < 0) { dprintk(0, "ERROR: Firmware ping failed!\n"); return -1; } retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version); if (retval < 0) { dprintk(0, "ERROR: Firmware get encoder version failed!\n"); return -1; } dprintk(0, "Firmware version is 0x%08x\n", version); } cx_write(MO_PINMUX_IO, 0x88); /* 656-8bit IO and enable MPEG parallel IO */ cx_clear(MO_INPUT_FORMAT, 0x100); /* chroma subcarrier lock to normal? */ cx_write(MO_VBOS_CONTROL, 0x84A00); /* no 656 mode, 8-bit pixels, disable VBI */ cx_clear(MO_OUTPUT_FORMAT, 0x0008); /* Normal Y-limits to let the mpeg encoder sync */ blackbird_codec_settings(dev); blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0, BLACKBIRD_FIELD1_SAA7115, BLACKBIRD_FIELD2_SAA7115 ); blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0, BLACKBIRD_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); return 0; } static int blackbird_start_codec(struct file *file, void *priv) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; /* start capturing to the host interface */ u32 reg; int i; int lastchange = -1; int lastval = 0; for (i = 0; (i < 10) && (i < (lastchange + 4)); i++) { reg = cx_read(AUD_STATUS); dprintk(1, "AUD_STATUS:%dL: 0x%x\n", i, reg); if ((reg & 0x0F) != lastval) { lastval = reg & 0x0F; lastchange = i; } msleep(100); } /* unmute audio source */ cx_clear(AUD_VOL_CTL, (1 << 6)); blackbird_api_cmd(dev, CX2341X_ENC_REFRESH_INPUT, 0, 0); /* initialize the video input */ blackbird_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0); /* start capturing to the host interface */ blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0, BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE ); dev->mpeg_active = 1; return 0; } static int blackbird_stop_codec(struct cx8802_dev *dev) { blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0, BLACKBIRD_END_NOW, BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE ); dev->mpeg_active = 0; return 0; } /* ------------------------------------------------------------------ */ static int bb_buf_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { struct cx8802_fh *fh = q->priv_data; fh->dev->ts_packet_size = 188 * 4; /* was: 512 */ fh->dev->ts_packet_count = mpegbufs; /* was: 100 */ *size = fh->dev->ts_packet_size * fh->dev->ts_packet_count; *count = fh->dev->ts_packet_count; return 0; } static int bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx8802_fh *fh = q->priv_data; return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field); } static void bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx8802_fh *fh = q->priv_data; cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb); } static void bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { cx88_free_buffer(q, (struct cx88_buffer*)vb); } static struct videobuf_queue_ops blackbird_qops = { .buf_setup = bb_buf_setup, .buf_prepare = bb_buf_prepare, .buf_queue = bb_buf_queue, .buf_release = bb_buf_release, }; /* ------------------------------------------------------------------ */ static const u32 *ctrl_classes[] = { cx88_user_ctrls, cx2341x_mpeg_ctrls, NULL }; static int blackbird_queryctrl(struct cx8802_dev *dev, struct v4l2_queryctrl *qctrl) { qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (qctrl->id == 0) return -EINVAL; /* Standard V4L2 controls */ if (cx8800_ctrl_query(dev->core, qctrl) == 0) return 0; /* MPEG V4L2 controls */ if (cx2341x_ctrl_query(&dev->params, qctrl)) qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; return 0; } /* ------------------------------------------------------------------ */ /* IOCTL Handlers */ static int vidioc_querymenu (struct file *file, void *priv, struct v4l2_querymenu *qmenu) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct v4l2_queryctrl qctrl; qctrl.id = qmenu->id; blackbird_queryctrl(dev, &qctrl); return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(&dev->params, qmenu->id)); } static int vidioc_querycap (struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; strcpy(cap->driver, "cx88_blackbird"); strlcpy(cap->card, core->board.name, sizeof(cap->card)); sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (UNSET != core->board.tuner_type) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; strlcpy(f->description, "MPEG", sizeof(f->description)); f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int vidioc_g_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */ f->fmt.pix.colorspace = 0; f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.field = fh->mpegq.field; dprintk(0,"VIDIOC_G_FMT: w: %d, h: %d, f: %d\n", dev->width, dev->height, fh->mpegq.field ); return 0; } static int vidioc_try_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */; f->fmt.pix.colorspace = 0; dprintk(0,"VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n", dev->width, dev->height, fh->mpegq.field ); return 0; } static int vidioc_s_fmt_vid_cap (struct file *file, void *priv, struct v4l2_format *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; struct cx88_core *core = dev->core; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */; f->fmt.pix.colorspace = 0; dev->width = f->fmt.pix.width; dev->height = f->fmt.pix.height; fh->mpegq.field = f->fmt.pix.field; cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field); blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0, f->fmt.pix.height, f->fmt.pix.width); dprintk(0,"VIDIOC_S_FMT: w: %d, h: %d, f: %d\n", f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field ); return 0; } static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct cx8802_fh *fh = priv; return (videobuf_reqbufs(&fh->mpegq, p)); } static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_querybuf(&fh->mpegq, p)); } static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_qbuf(&fh->mpegq, p)); } static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p) { struct cx8802_fh *fh = priv; return (videobuf_dqbuf(&fh->mpegq, p, file->f_flags & O_NONBLOCK)); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8802_fh *fh = priv; return videobuf_streamon(&fh->mpegq); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx8802_fh *fh = priv; return videobuf_streamoff(&fh->mpegq); } static int vidioc_g_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; return cx2341x_ext_ctrls(&dev->params, 0, f, VIDIOC_G_EXT_CTRLS); } static int vidioc_s_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; if (dev->mpeg_active) blackbird_stop_codec(dev); p = dev->params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS); if (!err) { err = cx2341x_update(dev, blackbird_mbox_func, &dev->params, &p); dev->params = p; } return err; } static int vidioc_try_ext_ctrls (struct file *file, void *priv, struct v4l2_ext_controls *f) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx2341x_mpeg_params p; int err; if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; p = dev->params; err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS); return err; } static int vidioc_s_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8802_fh *fh = priv; struct cx8802_dev *dev = fh->dev; struct cx88_core *core = dev->core; if (dev->mpeg_active) blackbird_stop_codec(dev); cx88_set_freq (core,f); blackbird_initialize_codec(dev); cx88_set_scale(dev->core, dev->width, dev->height, fh->mpegq.field); return 0; } static int vidioc_log_status (struct file *file, void *priv) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; struct cx88_core *core = dev->core; char name[32 + 2]; snprintf(name, sizeof(name), "%s/2", core->name); printk("%s/2: ============ START LOG STATUS ============\n", core->name); call_all(core, core, log_status); cx2341x_log_status(&dev->params, name); printk("%s/2: ============= END LOG STATUS =============\n", core->name); return 0; } static int vidioc_queryctrl (struct file *file, void *priv, struct v4l2_queryctrl *qctrl) { struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev; if (blackbird_queryctrl(dev, qctrl) == 0) return 0; qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id); if (unlikely(qctrl->id == 0)) return -EINVAL; return cx8800_ctrl_query(dev->core, qctrl); } static int vidioc_enum_input (struct file *file, void *priv, struct v4l2_input *i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_enum_input (core,i); } static int vidioc_g_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_get_control(core,ctl); } static int vidioc_s_ctrl (struct file *file, void *priv, struct v4l2_control *ctl) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; return cx88_set_control(core,ctl); } static int vidioc_g_frequency (struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8802_fh *fh = priv; struct cx88_core *core = fh->dev->core; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; f->type = V4L2_TUNER_ANALOG_TV; f->frequency = core->freq; call_all(core, tuner, g_frequency, f); return 0; } static int vidioc_g_input (struct file *file, void *priv, unsigned int *i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; *i = core->input; return 0; } static int vidioc_s_input (struct file *file, void *priv, unsigned int i) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; if (i >= 4) return -EINVAL; mutex_lock(&core->lock); cx88_newstation(core); cx88_video_mux(core,i); mutex_unlock(&core->lock); return 0; } static int vidioc_g_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; u32 reg; if (unlikely(UNSET == core->board.tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; cx88_get_stereo(core ,t); reg = cx_read(MO_DEVICE_STATUS); t->signal = (reg & (1<<5)) ? 0xffff : 0x0000; return 0; } static int vidioc_s_tuner (struct file *file, void *priv, struct v4l2_tuner *t) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; if (UNSET == core->board.tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; cx88_set_stereo(core, t->audmode, 1); return 0; } static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id) { struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core; mutex_lock(&core->lock); cx88_set_tvnorm(core,*id); mutex_unlock(&core->lock); return 0; } /* FIXME: cx88_ioctl_hook not implemented */ static int mpeg_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx8802_dev *dev = video_drvdata(file); struct cx8802_fh *fh; struct cx8802_driver *drv = NULL; int err; dprintk( 1, "%s\n", __func__); mutex_lock(&dev->core->lock); /* Make sure we can acquire the hardware */ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); if (!drv) { dprintk(1, "%s: blackbird driver is not loaded\n", __func__); mutex_unlock(&dev->core->lock); return -ENODEV; } err = drv->request_acquire(drv); if (err != 0) { dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err); mutex_unlock(&dev->core->lock); return err; } if (!dev->core->mpeg_users && blackbird_initialize_codec(dev) < 0) { drv->request_release(drv); mutex_unlock(&dev->core->lock); return -EINVAL; } dprintk(1, "open dev=%s\n", video_device_node_name(vdev)); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh),GFP_KERNEL); if (NULL == fh) { drv->request_release(drv); mutex_unlock(&dev->core->lock); return -ENOMEM; } file->private_data = fh; fh->dev = dev; videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx88_buffer), fh, NULL); /* FIXME: locking against other video device */ cx88_set_scale(dev->core, dev->width, dev->height, fh->mpegq.field); dev->core->mpeg_users++; mutex_unlock(&dev->core->lock); return 0; } static int mpeg_release(struct file *file) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; struct cx8802_driver *drv = NULL; mutex_lock(&dev->core->lock); if (dev->mpeg_active && dev->core->mpeg_users == 1) blackbird_stop_codec(dev); cx8802_cancel_buffers(fh->dev); /* stop mpeg capture */ videobuf_stop(&fh->mpegq); videobuf_mmap_free(&fh->mpegq); file->private_data = NULL; kfree(fh); /* Make sure we release the hardware */ drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD); WARN_ON(!drv); if (drv) drv->request_release(drv); dev->core->mpeg_users--; mutex_unlock(&dev->core->lock); return 0; } static ssize_t mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; if (!dev->mpeg_active) blackbird_start_codec(file, fh); return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); } static unsigned int mpeg_poll(struct file *file, struct poll_table_struct *wait) { struct cx8802_fh *fh = file->private_data; struct cx8802_dev *dev = fh->dev; if (!dev->mpeg_active) blackbird_start_codec(file, fh); return videobuf_poll_stream(file, &fh->mpegq, wait); } static int mpeg_mmap(struct file *file, struct vm_area_struct * vma) { struct cx8802_fh *fh = file->private_data; return videobuf_mmap_mapper(&fh->mpegq, vma); } static const struct v4l2_file_operations mpeg_fops = { .owner = THIS_MODULE, .open = mpeg_open, .release = mpeg_release, .read = mpeg_read, .poll = mpeg_poll, .mmap = mpeg_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops mpeg_ioctl_ops = { .vidioc_querymenu = vidioc_querymenu, .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls, .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls, .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = vidioc_log_status, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_s_std = vidioc_s_std, }; static struct video_device cx8802_mpeg_template = { .name = "cx8802", .fops = &mpeg_fops, .ioctl_ops = &mpeg_ioctl_ops, .tvnorms = CX88_NORMS, .current_norm = V4L2_STD_NTSC_M, }; /* ------------------------------------------------------------------ */ /* The CX8802 MPEG API will call this when we can use the hardware */ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* By default, core setup will leave the cx22702 out of reset, on the bus. * We left the hardware on power up with the cx22702 active. * We're being given access to re-arrange the GPIOs. * Take the bus off the cx22702 and put the cx23416 on it. */ /* Toggle reset on cx22702 leaving i2c active */ cx_set(MO_GP0_IO, 0x00000080); udelay(1000); cx_clear(MO_GP0_IO, 0x00000080); udelay(50); cx_set(MO_GP0_IO, 0x00000080); udelay(1000); /* tri-state the cx22702 pins */ cx_set(MO_GP0_IO, 0x00000004); udelay(1000); break; default: err = -ENODEV; } return err; } /* The CX8802 MPEG API will call this when we need to release the hardware */ static int cx8802_blackbird_advise_release(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; int err = 0; switch (core->boardnr) { case CX88_BOARD_HAUPPAUGE_HVR1300: /* Exit leaving the cx23416 on the bus */ break; default: err = -ENODEV; } return err; } static void blackbird_unregister_video(struct cx8802_dev *dev) { if (dev->mpeg_dev) { if (video_is_registered(dev->mpeg_dev)) video_unregister_device(dev->mpeg_dev); else video_device_release(dev->mpeg_dev); dev->mpeg_dev = NULL; } } static int blackbird_register_video(struct cx8802_dev *dev) { int err; dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci, &cx8802_mpeg_template,"mpeg"); video_set_drvdata(dev->mpeg_dev, dev); err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1); if (err < 0) { printk(KERN_INFO "%s/2: can't register mpeg device\n", dev->core->name); return err; } printk(KERN_INFO "%s/2: registered device %s [mpeg]\n", dev->core->name, video_device_node_name(dev->mpeg_dev)); return 0; } /* ----------------------------------------------------------- */ static int cx8802_blackbird_probe(struct cx8802_driver *drv) { struct cx88_core *core = drv->core; struct cx8802_dev *dev = core->dvbdev; int err; dprintk( 1, "%s\n", __func__); dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n", core->boardnr, core->name, core->pci_bus, core->pci_slot); err = -ENODEV; if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD)) goto fail_core; dev->width = 720; dev->height = 576; cx2341x_fill_defaults(&dev->params); dev->params.port = CX2341X_PORT_STREAMING; cx8802_mpeg_template.current_norm = core->tvnorm; if (core->tvnorm & V4L2_STD_525_60) { dev->height = 480; } else { dev->height = 576; } /* blackbird stuff */ printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n", core->name); host_setup(dev->core); blackbird_initialize_codec(dev); blackbird_register_video(dev); /* initial device configuration: needed ? */ // init_controls(core); cx88_set_tvnorm(core,core->tvnorm); cx88_video_mux(core,0); return 0; fail_core: return err; } static int cx8802_blackbird_remove(struct cx8802_driver *drv) { /* blackbird */ blackbird_unregister_video(drv->core->dvbdev); return 0; } static struct cx8802_driver cx8802_blackbird_driver = { .type_id = CX88_MPEG_BLACKBIRD, .hw_access = CX8802_DRVCTL_SHARED, .probe = cx8802_blackbird_probe, .remove = cx8802_blackbird_remove, .advise_acquire = cx8802_blackbird_advise_acquire, .advise_release = cx8802_blackbird_advise_release, }; static int __init blackbird_init(void) { printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n", CX88_VERSION); return cx8802_register_driver(&cx8802_blackbird_driver); } static void __exit blackbird_fini(void) { cx8802_unregister_driver(&cx8802_blackbird_driver); } module_init(blackbird_init); module_exit(blackbird_fini); module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644); MODULE_PARM_DESC(debug,"enable debug messages [video]");
gpl-2.0
Snuzzo/funky_dna_old
arch/tile/lib/strlen_64.c
7260
1165
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #undef strlen size_t strlen(const char *s) { /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint64_t *p = (const uint64_t *)(s_int & -8); /* Read the first word, but force bytes before the string to be nonzero. * This expression works because we know shift counts are taken mod 64. */ uint64_t v = *p | ((1ULL << (s_int << 3)) - 1); uint64_t bits; while ((bits = __insn_v1cmpeqi(v, 0)) == 0) v = *++p; return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; } EXPORT_SYMBOL(strlen);
gpl-2.0
SlimRoms/kernel_samsung_exynos5410
net/xfrm/xfrm_algo.c
7260
13675
/* * xfrm algorithm interface * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <net/xfrm.h> #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE) #include <net/ah.h> #endif #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) #include <net/esp.h> #endif /* * Algorithms supported by IPsec. These entries contain properties which * are used in key negotiation and xfrm processing, and are used to verify * that instantiated crypto transforms have correct parameters for IPsec * purposes. */ static struct xfrm_algo_desc aead_list[] = { { .name = "rfc4106(gcm(aes))", .uinfo = { .aead = { .icv_truncbits = 64, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV8, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4106(gcm(aes))", .uinfo = { .aead = { .icv_truncbits = 96, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV12, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4106(gcm(aes))", .uinfo = { .aead = { .icv_truncbits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_GCM_ICV16, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4309(ccm(aes))", .uinfo = { .aead = { .icv_truncbits = 64, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV8, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4309(ccm(aes))", .uinfo = { .aead = { .icv_truncbits = 96, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV12, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4309(ccm(aes))", .uinfo = { .aead = { .icv_truncbits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AES_CCM_ICV16, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc4543(gcm(aes))", .uinfo = { .aead = { .icv_truncbits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, }; static struct xfrm_algo_desc aalg_list[] = { { .name = "digest_null", .uinfo = { .auth = { .icv_truncbits = 0, .icv_fullbits = 0, } }, .desc = { .sadb_alg_id = SADB_X_AALG_NULL, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 0, .sadb_alg_maxbits = 0 } }, { .name = "hmac(md5)", .compat = "md5", .uinfo = { .auth = { .icv_truncbits = 96, .icv_fullbits = 128, } }, .desc = { .sadb_alg_id = SADB_AALG_MD5HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 128 } }, { .name = "hmac(sha1)", .compat = "sha1", .uinfo = { .auth = { .icv_truncbits = 96, .icv_fullbits = 160, } }, .desc = { .sadb_alg_id = SADB_AALG_SHA1HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 160, .sadb_alg_maxbits = 160 } }, { .name = "hmac(sha256)", .compat = "sha256", .uinfo = { .auth = { .icv_truncbits = 96, .icv_fullbits = 256, } }, .desc = { .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 256, .sadb_alg_maxbits = 256 } }, { .name = "hmac(sha384)", .uinfo = { .auth = { .icv_truncbits = 192, .icv_fullbits = 384, } }, .desc = { .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 384, .sadb_alg_maxbits = 384 } }, { .name = "hmac(sha512)", .uinfo = { .auth = { .icv_truncbits = 256, .icv_fullbits = 512, } }, .desc = { .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 512, .sadb_alg_maxbits = 512 } }, { .name = "hmac(rmd160)", .compat = "rmd160", .uinfo = { .auth = { .icv_truncbits = 96, .icv_fullbits = 160, } }, .desc = { .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 160, .sadb_alg_maxbits = 160 } }, { .name = "xcbc(aes)", .uinfo = { .auth = { .icv_truncbits = 96, .icv_fullbits = 128, } }, .desc = { .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 128 } }, }; static struct xfrm_algo_desc ealg_list[] = { { .name = "ecb(cipher_null)", .compat = "cipher_null", .uinfo = { .encr = { .blockbits = 8, .defkeybits = 0, } }, .desc = { .sadb_alg_id = SADB_EALG_NULL, .sadb_alg_ivlen = 0, .sadb_alg_minbits = 0, .sadb_alg_maxbits = 0 } }, { .name = "cbc(des)", .compat = "des", .uinfo = { .encr = { .blockbits = 64, .defkeybits = 64, } }, .desc = { .sadb_alg_id = SADB_EALG_DESCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 64, .sadb_alg_maxbits = 64 } }, { .name = "cbc(des3_ede)", .compat = "des3_ede", .uinfo = { .encr = { .blockbits = 64, .defkeybits = 192, } }, .desc = { .sadb_alg_id = SADB_EALG_3DESCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 192, .sadb_alg_maxbits = 192 } }, { .name = "cbc(cast5)", .compat = "cast5", .uinfo = { .encr = { .blockbits = 64, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_CASTCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 40, .sadb_alg_maxbits = 128 } }, { .name = "cbc(blowfish)", .compat = "blowfish", .uinfo = { .encr = { .blockbits = 64, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 40, .sadb_alg_maxbits = 448 } }, { .name = "cbc(aes)", .compat = "aes", .uinfo = { .encr = { .blockbits = 128, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_AESCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "cbc(serpent)", .compat = "serpent", .uinfo = { .encr = { .blockbits = 128, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_SERPENTCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256, } }, { .name = "cbc(camellia)", .compat = "camellia", .uinfo = { .encr = { .blockbits = 128, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_CAMELLIACBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "cbc(twofish)", .compat = "twofish", .uinfo = { .encr = { .blockbits = 128, .defkeybits = 128, } }, .desc = { .sadb_alg_id = SADB_X_EALG_TWOFISHCBC, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 128, .sadb_alg_maxbits = 256 } }, { .name = "rfc3686(ctr(aes))", .uinfo = { .encr = { .blockbits = 128, .defkeybits = 160, /* 128-bit key + 32-bit nonce */ } }, .desc = { .sadb_alg_id = SADB_X_EALG_AESCTR, .sadb_alg_ivlen = 8, .sadb_alg_minbits = 160, .sadb_alg_maxbits = 288 } }, }; static struct xfrm_algo_desc calg_list[] = { { .name = "deflate", .uinfo = { .comp = { .threshold = 90, } }, .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE } }, { .name = "lzs", .uinfo = { .comp = { .threshold = 90, } }, .desc = { .sadb_alg_id = SADB_X_CALG_LZS } }, { .name = "lzjh", .uinfo = { .comp = { .threshold = 50, } }, .desc = { .sadb_alg_id = SADB_X_CALG_LZJH } }, }; static inline int aead_entries(void) { return ARRAY_SIZE(aead_list); } static inline int aalg_entries(void) { return ARRAY_SIZE(aalg_list); } static inline int ealg_entries(void) { return ARRAY_SIZE(ealg_list); } static inline int calg_entries(void) { return ARRAY_SIZE(calg_list); } struct xfrm_algo_list { struct xfrm_algo_desc *algs; int entries; u32 type; u32 mask; }; static const struct xfrm_algo_list xfrm_aead_list = { .algs = aead_list, .entries = ARRAY_SIZE(aead_list), .type = CRYPTO_ALG_TYPE_AEAD, .mask = CRYPTO_ALG_TYPE_MASK, }; static const struct xfrm_algo_list xfrm_aalg_list = { .algs = aalg_list, .entries = ARRAY_SIZE(aalg_list), .type = CRYPTO_ALG_TYPE_HASH, .mask = CRYPTO_ALG_TYPE_HASH_MASK, }; static const struct xfrm_algo_list xfrm_ealg_list = { .algs = ealg_list, .entries = ARRAY_SIZE(ealg_list), .type = CRYPTO_ALG_TYPE_BLKCIPHER, .mask = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, }; static const struct xfrm_algo_list xfrm_calg_list = { .algs = calg_list, .entries = ARRAY_SIZE(calg_list), .type = CRYPTO_ALG_TYPE_COMPRESS, .mask = CRYPTO_ALG_TYPE_MASK, }; static struct xfrm_algo_desc *xfrm_find_algo( const struct xfrm_algo_list *algo_list, int match(const struct xfrm_algo_desc *entry, const void *data), const void *data, int probe) { struct xfrm_algo_desc *list = algo_list->algs; int i, status; for (i = 0; i < algo_list->entries; i++) { if (!match(list + i, data)) continue; if (list[i].available) return &list[i]; if (!probe) break; status = crypto_has_alg(list[i].name, algo_list->type, algo_list->mask); if (!status) break; list[i].available = status; return &list[i]; } return NULL; } static int xfrm_alg_id_match(const struct xfrm_algo_desc *entry, const void *data) { return entry->desc.sadb_alg_id == (unsigned long)data; } struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id) { return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_id_match, (void *)(unsigned long)alg_id, 1); } EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid); struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id) { return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_id_match, (void *)(unsigned long)alg_id, 1); } EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid); struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id) { return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_id_match, (void *)(unsigned long)alg_id, 1); } EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); static int xfrm_alg_name_match(const struct xfrm_algo_desc *entry, const void *data) { const char *name = data; return name && (!strcmp(name, entry->name) || (entry->compat && !strcmp(name, entry->compat))); } struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe) { return xfrm_find_algo(&xfrm_aalg_list, xfrm_alg_name_match, name, probe); } EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe) { return xfrm_find_algo(&xfrm_ealg_list, xfrm_alg_name_match, name, probe); } EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe) { return xfrm_find_algo(&xfrm_calg_list, xfrm_alg_name_match, name, probe); } EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); struct xfrm_aead_name { const char *name; int icvbits; }; static int xfrm_aead_name_match(const struct xfrm_algo_desc *entry, const void *data) { const struct xfrm_aead_name *aead = data; const char *name = aead->name; return aead->icvbits == entry->uinfo.aead.icv_truncbits && name && !strcmp(name, entry->name); } struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, int probe) { struct xfrm_aead_name data = { .name = name, .icvbits = icv_len, }; return xfrm_find_algo(&xfrm_aead_list, xfrm_aead_name_match, &data, probe); } EXPORT_SYMBOL_GPL(xfrm_aead_get_byname); struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx) { if (idx >= aalg_entries()) return NULL; return &aalg_list[idx]; } EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx); struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx) { if (idx >= ealg_entries()) return NULL; return &ealg_list[idx]; } EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx); /* * Probe for the availability of crypto algorithms, and set the available * flag for any algorithms found on the system. This is typically called by * pfkey during userspace SA add, update or register. */ void xfrm_probe_algs(void) { int i, status; BUG_ON(in_softirq()); for (i = 0; i < aalg_entries(); i++) { status = crypto_has_hash(aalg_list[i].name, 0, CRYPTO_ALG_ASYNC); if (aalg_list[i].available != status) aalg_list[i].available = status; } for (i = 0; i < ealg_entries(); i++) { status = crypto_has_blkcipher(ealg_list[i].name, 0, CRYPTO_ALG_ASYNC); if (ealg_list[i].available != status) ealg_list[i].available = status; } for (i = 0; i < calg_entries(); i++) { status = crypto_has_comp(calg_list[i].name, 0, CRYPTO_ALG_ASYNC); if (calg_list[i].available != status) calg_list[i].available = status; } } EXPORT_SYMBOL_GPL(xfrm_probe_algs); int xfrm_count_auth_supported(void) { int i, n; for (i = 0, n = 0; i < aalg_entries(); i++) if (aalg_list[i].available) n++; return n; } EXPORT_SYMBOL_GPL(xfrm_count_auth_supported); int xfrm_count_enc_supported(void) { int i, n; for (i = 0, n = 0; i < ealg_entries(); i++) if (ealg_list[i].available) n++; return n; } EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) { if (tail != skb) { skb->data_len += len; skb->len += len; } return skb_put(tail, len); } EXPORT_SYMBOL_GPL(pskb_put); #endif
gpl-2.0
Talustus/android_kernel_exynos-3.4
arch/tile/kernel/sys.c
7260
3584
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * This file contains various random system calls that * have a non-standard calling sequence on the Linux/TILE * platform. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/mempolicy.h> #include <linux/binfmts.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <linux/signal.h> #include <asm/syscalls.h> #include <asm/pgtable.h> #include <asm/homecache.h> #include <arch/chip.h> SYSCALL_DEFINE0(flush_cache) { homecache_evict(cpumask_of(smp_processor_id())); return 0; } /* * Syscalls that pass 64-bit values on 32-bit systems normally * pass them as (low,high) word packed into the immediately adjacent * registers. If the low word naturally falls on an even register, * our ABI makes it work correctly; if not, we adjust it here. * Handling it here means we don't have to fix uclibc AND glibc AND * any other standard libcs we want to support. */ #if !defined(__tilegx__) || defined(CONFIG_COMPAT) ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count) { return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); } int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, u32 len_lo, u32 len_hi, int advice) { return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, ((loff_t)len_hi << 32) | len_lo, advice); } #endif /* 32-bit syscall wrappers */ /* Note: used by the compat code even in 64-bit Linux. */ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off_4k) { #define PAGE_ADJUST (PAGE_SHIFT - 12) if (off_4k & ((1 << PAGE_ADJUST) - 1)) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, off_4k >> PAGE_ADJUST); } #ifdef __tilegx__ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) { if (offset & ((1 << PAGE_SHIFT) - 1)) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); } #endif /* Provide the actual syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), #ifndef __tilegx__ /* See comments at the top of the file. */ #define sys_fadvise64_64 sys32_fadvise64_64 #define sys_readahead sys32_readahead #endif /* Call the trampolines to manage pt_regs where necessary. */ #define sys_execve _sys_execve #define sys_sigaltstack _sys_sigaltstack #define sys_rt_sigreturn _sys_rt_sigreturn #define sys_clone _sys_clone #ifndef __tilegx__ #define sys_cmpxchg_badaddr _sys_cmpxchg_badaddr #endif /* * Note that we can't include <linux/unistd.h> here since the header * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. */ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls-1] = sys_ni_syscall, #include <asm/unistd.h> };
gpl-2.0
nandra/omap_850_kernel
arch/ppc/syslib/mpc52xx_pci.c
93
6857
/* * PCI code for the Freescale MPC52xx embedded CPU. * * * Maintainer : Sylvain Munaut <tnt@246tNt.com> * * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <asm/pci.h> #include <asm/mpc52xx.h> #include "mpc52xx_pci.h" #include <asm/delay.h> #include <asm/machdep.h> /* This macro is defined to activate the workaround for the bug 435 of the MPC5200 (L25R). With it activated, we don't do any 32 bits configuration access during type-1 cycles */ #define MPC5200_BUG_435_WORKAROUND static int mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = bus->sysdata; u32 value; if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; out_be32(hose->cfg_addr, (1 << 31) | ((bus->number - hose->bus_offset) << 16) | (devfn << 8) | (offset & 0xfc)); mb(); #ifdef MPC5200_BUG_435_WORKAROUND if (bus->number != hose->bus_offset) { switch (len) { case 1: value = in_8(((u8 __iomem *)hose->cfg_data) + (offset & 3)); break; case 2: value = in_le16(((u16 __iomem *)hose->cfg_data) + ((offset>>1) & 1)); break; default: value = in_le16((u16 __iomem *)hose->cfg_data) | (in_le16(((u16 __iomem *)hose->cfg_data) + 1) << 16); break; } } else #endif { value = in_le32(hose->cfg_data); if (len != 4) { value >>= ((offset & 0x3) << 3); value &= 0xffffffff >> (32 - (len << 3)); } } *val = value; out_be32(hose->cfg_addr, 0); mb(); return PCIBIOS_SUCCESSFUL; } static int mpc52xx_pci_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = bus->sysdata; u32 value, mask; if (ppc_md.pci_exclude_device) if (ppc_md.pci_exclude_device(bus->number, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; out_be32(hose->cfg_addr, (1 << 31) | ((bus->number - hose->bus_offset) << 16) | (devfn << 8) | (offset & 0xfc)); mb(); #ifdef MPC5200_BUG_435_WORKAROUND if (bus->number != hose->bus_offset) { switch (len) { case 1: out_8(((u8 __iomem *)hose->cfg_data) + (offset & 3), val); break; case 2: out_le16(((u16 __iomem *)hose->cfg_data) + ((offset>>1) & 1), val); break; default: out_le16((u16 __iomem *)hose->cfg_data, (u16)val); out_le16(((u16 __iomem *)hose->cfg_data) + 1, (u16)(val>>16)); break; } } else #endif { if (len != 4) { value = in_le32(hose->cfg_data); offset = (offset & 0x3) << 3; mask = (0xffffffff >> (32 - (len << 3))); mask <<= offset; value &= ~mask; val = value | ((val << offset) & mask); } out_le32(hose->cfg_data, val); } mb(); out_be32(hose->cfg_addr, 0); mb(); return PCIBIOS_SUCCESSFUL; } static struct pci_ops mpc52xx_pci_ops = { .read = mpc52xx_pci_read_config, .write = mpc52xx_pci_write_config }; static void __init mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs) { u32 tmp; /* Setup control regs */ tmp = in_be32(&pci_regs->scr); tmp |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; out_be32(&pci_regs->scr, tmp); /* Setup windows */ out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION( MPC52xx_PCI_MEM_START + MPC52xx_PCI_MEM_OFFSET, MPC52xx_PCI_MEM_START, MPC52xx_PCI_MEM_SIZE )); out_be32(&pci_regs->iw1btar, MPC52xx_PCI_IWBTAR_TRANSLATION( MPC52xx_PCI_MMIO_START + MPC52xx_PCI_MEM_OFFSET, MPC52xx_PCI_MMIO_START, MPC52xx_PCI_MMIO_SIZE )); out_be32(&pci_regs->iw2btar, MPC52xx_PCI_IWBTAR_TRANSLATION( MPC52xx_PCI_IO_BASE, MPC52xx_PCI_IO_START, MPC52xx_PCI_IO_SIZE )); out_be32(&pci_regs->iwcr, MPC52xx_PCI_IWCR_PACK( ( MPC52xx_PCI_IWCR_ENABLE | /* iw0btar */ MPC52xx_PCI_IWCR_READ_MULTI | MPC52xx_PCI_IWCR_MEM ), ( MPC52xx_PCI_IWCR_ENABLE | /* iw1btar */ MPC52xx_PCI_IWCR_READ | MPC52xx_PCI_IWCR_MEM ), ( MPC52xx_PCI_IWCR_ENABLE | /* iw2btar */ MPC52xx_PCI_IWCR_IO ) )); out_be32(&pci_regs->tbatr0, MPC52xx_PCI_TBATR_ENABLE | MPC52xx_PCI_TARGET_IO ); out_be32(&pci_regs->tbatr1, MPC52xx_PCI_TBATR_ENABLE | MPC52xx_PCI_TARGET_MEM ); out_be32(&pci_regs->tcr, MPC52xx_PCI_TCR_LD); /* Reset the exteral bus ( internal PCI controller is NOT resetted ) */ /* Not necessary and can be a bad thing if for example the bootloader is displaying a splash screen or ... Just left here for documentation purpose if anyone need it */ tmp = in_be32(&pci_regs->gscr); #if 0 out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR); udelay(50); #endif out_be32(&pci_regs->gscr, tmp & ~MPC52xx_PCI_GSCR_PR); } static void mpc52xx_pci_fixup_resources(struct pci_dev *dev) { int i; /* We don't rely on boot loader for PCI and resets all devices */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = &dev->resource[i]; if (res->end > res->start) { /* Only valid resources */ res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; } } /* The PCI Host bridge of MPC52xx has a prefetch memory resource fixed to 1Gb. Doesn't fit in the resource system so we remove it */ if ( (dev->vendor == PCI_VENDOR_ID_MOTOROLA) && ( dev->device == PCI_DEVICE_ID_MOTOROLA_MPC5200 || dev->device == PCI_DEVICE_ID_MOTOROLA_MPC5200B) ) { struct resource *res = &dev->resource[1]; res->start = res->end = res->flags = 0; } } void __init mpc52xx_find_bridges(void) { struct mpc52xx_pci __iomem *pci_regs; struct pci_controller *hose; pci_assign_all_buses = 1; pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE); if (!pci_regs) return; hose = pcibios_alloc_controller(); if (!hose) { iounmap(pci_regs); return; } ppc_md.pci_swizzle = common_swizzle; ppc_md.pcibios_fixup_resources = mpc52xx_pci_fixup_resources; hose->first_busno = 0; hose->last_busno = 0xff; hose->bus_offset = 0; hose->ops = &mpc52xx_pci_ops; mpc52xx_pci_setup(pci_regs); hose->pci_mem_offset = MPC52xx_PCI_MEM_OFFSET; hose->io_base_virt = ioremap(MPC52xx_PCI_IO_BASE, MPC52xx_PCI_IO_SIZE); isa_io_base = (unsigned long) hose->io_base_virt; hose->cfg_addr = &pci_regs->car; hose->cfg_data = hose->io_base_virt; /* Setup resources */ pci_init_resource(&hose->mem_resources[0], MPC52xx_PCI_MEM_START, MPC52xx_PCI_MEM_STOP, IORESOURCE_MEM|IORESOURCE_PREFETCH, "PCI prefetchable memory"); pci_init_resource(&hose->mem_resources[1], MPC52xx_PCI_MMIO_START, MPC52xx_PCI_MMIO_STOP, IORESOURCE_MEM, "PCI memory"); pci_init_resource(&hose->io_resource, MPC52xx_PCI_IO_START, MPC52xx_PCI_IO_STOP, IORESOURCE_IO, "PCI I/O"); }
gpl-2.0
jiangweifeng04/linux_kernel_2_6_32
linux-2.6.32.61/arch/x86/kernel/cpu/mcheck/therm_throt.c
93
9701
/* * Thermal throttle event support code (such as syslog messaging and rate * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). * * This allows consistent reporting of CPU thermal throttle events. * * Maintains a counter in /sys that keeps track of the number of thermal * events, such that the user knows how bad the thermal problem might be * (since the logging to syslog and mcelog is rate limited). * * Author: Dmitriy Zavin (dmitriyz@google.com) * * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. * Inspired by Ross Biro's and Al Borchers' counter code. */ #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/sysdev.h> #include <linux/types.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/cpu.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/apic.h> #include <asm/idle.h> #include <asm/mce.h> #include <asm/msr.h> /* How long to wait between reporting thermal events */ #define CHECK_INTERVAL (300 * HZ) /* * Current thermal throttling state: */ struct thermal_state { bool is_throttled; u64 next_check; unsigned long throttle_count; unsigned long last_throttle_count; }; static DEFINE_PER_CPU(struct thermal_state, thermal_state); static atomic_t therm_throt_en = ATOMIC_INIT(0); static u32 lvtthmr_init __read_mostly; #ifdef CONFIG_SYSFS #define define_therm_throt_sysdev_one_ro(_name) \ static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) #define define_therm_throt_sysdev_show_func(name) \ \ static ssize_t therm_throt_sysdev_show_##name( \ struct sys_device *dev, \ struct sysdev_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ ssize_t ret; \ \ preempt_disable(); /* CPU hotplug */ \ if (cpu_online(cpu)) \ ret = sprintf(buf, "%lu\n", \ per_cpu(thermal_state, cpu).name); \ else \ ret = 0; \ preempt_enable(); \ \ return ret; \ } define_therm_throt_sysdev_show_func(throttle_count); define_therm_throt_sysdev_one_ro(throttle_count); static struct attribute *thermal_throttle_attrs[] = { &attr_throttle_count.attr, NULL }; static struct attribute_group thermal_throttle_attr_group = { .attrs = thermal_throttle_attrs, .name = "thermal_throttle" }; #endif /* CONFIG_SYSFS */ /*** * therm_throt_process - Process thermal throttling event from interrupt * @curr: Whether the condition is current or not (boolean), since the * thermal interrupt normally gets called both when the thermal * event begins and once the event has ended. * * This function is called by the thermal interrupt after the * IRQ has been acknowledged. * * It will take care of rate limiting and printing messages to the syslog. * * Returns: 0 : Event should NOT be further logged, i.e. still in * "timeout" from previous log message. * 1 : Event should be logged further, and a message has been * printed to the syslog. */ static int therm_throt_process(bool is_throttled) { struct thermal_state *state; unsigned int this_cpu; bool was_throttled; u64 now; this_cpu = smp_processor_id(); now = get_jiffies_64(); state = &per_cpu(thermal_state, this_cpu); was_throttled = state->is_throttled; state->is_throttled = is_throttled; if (is_throttled) state->throttle_count++; if (time_before64(now, state->next_check) && state->throttle_count != state->last_throttle_count) return 0; state->next_check = now + CHECK_INTERVAL; state->last_throttle_count = state->throttle_count; /* if we just entered the thermal event */ if (is_throttled) { printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count); add_taint(TAINT_MACHINE_CHECK); return 1; } if (was_throttled) { printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu); return 1; } return 0; } #ifdef CONFIG_SYSFS /* Add/Remove thermal_throttle interface for CPU device: */ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev) { return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); } static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) { sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); } /* Mutex protecting device creation against CPU hotplug: */ static DEFINE_MUTEX(therm_cpu_lock); /* Get notified when a cpu comes on/off. Be hotplug friendly. */ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct sys_device *sys_dev; int err = 0; sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&therm_cpu_lock); err = thermal_throttle_add_dev(sys_dev); mutex_unlock(&therm_cpu_lock); WARN_ON(err); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: mutex_lock(&therm_cpu_lock); thermal_throttle_remove_dev(sys_dev); mutex_unlock(&therm_cpu_lock); break; } return err ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = { .notifier_call = thermal_throttle_cpu_callback, }; static __init int thermal_throttle_init_device(void) { unsigned int cpu = 0; int err; if (!atomic_read(&therm_throt_en)) return 0; register_hotcpu_notifier(&thermal_throttle_cpu_notifier); #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&therm_cpu_lock); #endif /* connect live CPUs to sysfs */ for_each_online_cpu(cpu) { err = thermal_throttle_add_dev(get_cpu_sysdev(cpu)); WARN_ON(err); } #ifdef CONFIG_HOTPLUG_CPU mutex_unlock(&therm_cpu_lock); #endif return 0; } device_initcall(thermal_throttle_init_device); #endif /* CONFIG_SYSFS */ /* Thermal transition interrupt handler */ static void intel_thermal_interrupt(void) { __u64 msr_val; rdmsrl(MSR_IA32_THERM_STATUS, msr_val); if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0)) mce_log_therm_throt_event(msr_val); } static void unexpected_thermal_interrupt(void) { printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", smp_processor_id()); add_taint(TAINT_MACHINE_CHECK); } static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) { exit_idle(); irq_enter(); inc_irq_stat(irq_thermal_count); smp_thermal_vector(); irq_exit(); /* Ack only at the end to avoid potential reentry */ ack_APIC_irq(); } void __init mcheck_intel_therm_init(void) { /* * This function is only called on boot CPU. Save the init thermal * LVT value on BSP and use that value to restore APs' thermal LVT * entry BIOS programmed later */ if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) lvtthmr_init = apic_read(APIC_LVTTHMR); } void intel_init_thermal(struct cpuinfo_x86 *c) { unsigned int cpu = smp_processor_id(); int tm2 = 0; u32 l, h; /* Thermal monitoring depends on APIC, ACPI and clock modulation */ if (!cpu_has_apic || !cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) return; /* * First check if its enabled already, in which case there might * be some SMM goo which handles it, so we can't even put a handler * since it might be delivered via SMI already: */ rdmsr(MSR_IA32_MISC_ENABLE, l, h); h = lvtthmr_init; /* * The initial value of thermal LVT entries on all APs always reads * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI * sequence to them and LVT registers are reset to 0s except for * the mask bits which are set to 1s when APs receive INIT IPI. * If BIOS takes over the thermal interrupt and sets its interrupt * delivery mode to SMI (not fixed), it restores the value that the * BIOS has programmed on AP based on BSP's info we saved since BIOS * is always setting the same value for all threads/cores. */ if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) apic_write(APIC_LVTTHMR, lvtthmr_init); if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu); return; } /* Check whether a vector already exists */ if (h & APIC_VECTOR_MASK) { printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already installed\n", cpu, (h & APIC_VECTOR_MASK)); return; } /* early Pentium M models use different method for enabling TM2 */ if (cpu_has(c, X86_FEATURE_TM2)) { if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { rdmsr(MSR_THERM2_CTL, l, h); if (l & MSR_THERM2_CTL_TM_SELECT) tm2 = 1; } else if (l & MSR_IA32_MISC_ENABLE_TM2) tm2 = 1; } /* We'll mask the thermal vector in the lapic till we're ready: */ h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; apic_write(APIC_LVTTHMR, h); rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); wrmsr(MSR_IA32_THERM_INTERRUPT, l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); smp_thermal_vector = intel_thermal_interrupt; rdmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); /* Unmask the thermal vector: */ l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", cpu, tm2 ? "TM2" : "TM1"); /* enable thermal throttle processing */ atomic_set(&therm_throt_en, 1); }
gpl-2.0
linusw/linux-bfq
drivers/acpi/acpica/exmisc.c
93
13280
/****************************************************************************** * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * *****************************************************************************/ /* * Copyright (C) 2000 - 2016, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exmisc") /******************************************************************************* * * FUNCTION: acpi_ex_get_object_reference * * PARAMETERS: obj_desc - Create a reference to this object * return_desc - Where to store the reference * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Obtain and return a "reference" to the target object * Common code for the ref_of_op and the cond_ref_of_op. * ******************************************************************************/ acpi_status acpi_ex_get_object_reference(union acpi_operand_object *obj_desc, union acpi_operand_object **return_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *reference_obj; union acpi_operand_object *referenced_obj; ACPI_FUNCTION_TRACE_PTR(ex_get_object_reference, obj_desc); *return_desc = NULL; switch (ACPI_GET_DESCRIPTOR_TYPE(obj_desc)) { case ACPI_DESC_TYPE_OPERAND: if (obj_desc->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } /* * Must be a reference to a Local or Arg */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: case ACPI_REFCLASS_ARG: case ACPI_REFCLASS_DEBUG: /* The referenced object is the pseudo-node for the local/arg */ referenced_obj = obj_desc->reference.object; break; default: ACPI_ERROR((AE_INFO, "Invalid Reference Class 0x%2.2X", obj_desc->reference.class)); return_ACPI_STATUS(AE_AML_OPERAND_TYPE); } break; case ACPI_DESC_TYPE_NAMED: /* * A named reference that has already been resolved to a Node */ referenced_obj = obj_desc; break; default: ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X", ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); return_ACPI_STATUS(AE_TYPE); } /* Create a new reference object */ reference_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_REFERENCE); if (!reference_obj) { return_ACPI_STATUS(AE_NO_MEMORY); } reference_obj->reference.class = ACPI_REFCLASS_REFOF; reference_obj->reference.object = referenced_obj; *return_desc = reference_obj; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Object %p Type [%s], returning Reference %p\n", obj_desc, acpi_ut_get_object_type_name(obj_desc), *return_desc)); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_do_math_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * * RETURN: Integer result of the operation * * DESCRIPTION: Execute a math AML opcode. The purpose of having all of the * math functions here is to prevent a lot of pointer dereferencing * to obtain the operands. * ******************************************************************************/ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1) { ACPI_FUNCTION_ENTRY(); switch (opcode) { case AML_ADD_OP: /* Add (Integer0, Integer1, Result) */ return (integer0 + integer1); case AML_BIT_AND_OP: /* And (Integer0, Integer1, Result) */ return (integer0 & integer1); case AML_BIT_NAND_OP: /* NAnd (Integer0, Integer1, Result) */ return (~(integer0 & integer1)); case AML_BIT_OR_OP: /* Or (Integer0, Integer1, Result) */ return (integer0 | integer1); case AML_BIT_NOR_OP: /* NOr (Integer0, Integer1, Result) */ return (~(integer0 | integer1)); case AML_BIT_XOR_OP: /* XOr (Integer0, Integer1, Result) */ return (integer0 ^ integer1); case AML_MULTIPLY_OP: /* Multiply (Integer0, Integer1, Result) */ return (integer0 * integer1); case AML_SHIFT_LEFT_OP: /* shift_left (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 << integer1); case AML_SHIFT_RIGHT_OP: /* shift_right (Operand, shift_count, Result) */ /* * We need to check if the shiftcount is larger than the integer bit * width since the behavior of this is not well-defined in the C language. */ if (integer1 >= acpi_gbl_integer_bit_width) { return (0); } return (integer0 >> integer1); case AML_SUBTRACT_OP: /* Subtract (Integer0, Integer1, Result) */ return (integer0 - integer1); default: return (0); } } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_numeric_op * * PARAMETERS: opcode - AML opcode * integer0 - Integer operand #0 * integer1 - Integer operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical "Numeric" AML opcode. For these Numeric * operators (LAnd and LOr), both operands must be integers. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Integer0 && Integer1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_numeric_op(u16 opcode, u64 integer0, u64 integer1, u8 *logical_result) { acpi_status status = AE_OK; u8 local_result = FALSE; ACPI_FUNCTION_TRACE(ex_do_logical_numeric_op); switch (opcode) { case AML_LAND_OP: /* LAnd (Integer0, Integer1) */ if (integer0 && integer1) { local_result = TRUE; } break; case AML_LOR_OP: /* LOr (Integer0, Integer1) */ if (integer0 || integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_do_logical_op * * PARAMETERS: opcode - AML opcode * operand0 - operand #0 * operand1 - operand #1 * logical_result - TRUE/FALSE result of the operation * * RETURN: Status * * DESCRIPTION: Execute a logical AML opcode. The purpose of having all of the * functions here is to prevent a lot of pointer dereferencing * to obtain the operands and to simplify the generation of the * logical value. For the Numeric operators (LAnd and LOr), both * operands must be integers. For the other logical operators, * operands can be any combination of Integer/String/Buffer. The * first operand determines the type to which the second operand * will be converted. * * Note: cleanest machine code seems to be produced by the code * below, rather than using statements of the form: * Result = (Operand0 == Operand1); * ******************************************************************************/ acpi_status acpi_ex_do_logical_op(u16 opcode, union acpi_operand_object *operand0, union acpi_operand_object *operand1, u8 * logical_result) { union acpi_operand_object *local_operand1 = operand1; u64 integer0; u64 integer1; u32 length0; u32 length1; acpi_status status = AE_OK; u8 local_result = FALSE; int compare; ACPI_FUNCTION_TRACE(ex_do_logical_op); /* * Convert the second operand if necessary. The first operand * determines the type of the second operand, (See the Data Types * section of the ACPI 3.0+ specification.) Both object types are * guaranteed to be either Integer/String/Buffer by the operand * resolution mechanism. */ switch (operand0->common.type) { case ACPI_TYPE_INTEGER: status = acpi_ex_convert_to_integer(operand1, &local_operand1, 16); break; case ACPI_TYPE_STRING: status = acpi_ex_convert_to_string(operand1, &local_operand1, ACPI_IMPLICIT_CONVERT_HEX); break; case ACPI_TYPE_BUFFER: status = acpi_ex_convert_to_buffer(operand1, &local_operand1); break; default: status = AE_AML_INTERNAL; break; } if (ACPI_FAILURE(status)) { goto cleanup; } /* * Two cases: 1) Both Integers, 2) Both Strings or Buffers */ if (operand0->common.type == ACPI_TYPE_INTEGER) { /* * 1) Both operands are of type integer * Note: local_operand1 may have changed above */ integer0 = operand0->integer.value; integer1 = local_operand1->integer.value; switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ if (integer0 == integer1) { local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (integer0 > integer1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (integer0 < integer1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } else { /* * 2) Both operands are Strings or both are Buffers * Note: Code below takes advantage of common Buffer/String * object fields. local_operand1 may have changed above. Use * memcmp to handle nulls in buffers. */ length0 = operand0->buffer.length; length1 = local_operand1->buffer.length; /* Lexicographic compare: compare the data bytes */ compare = memcmp(operand0->buffer.pointer, local_operand1->buffer.pointer, (length0 > length1) ? length1 : length0); switch (opcode) { case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */ /* Length and all bytes must be equal */ if ((length0 == length1) && (compare == 0)) { /* Length and all bytes match ==> TRUE */ local_result = TRUE; } break; case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */ if (compare > 0) { local_result = TRUE; goto cleanup; /* TRUE */ } if (compare < 0) { goto cleanup; /* FALSE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 > length1) { local_result = TRUE; } break; case AML_LLESS_OP: /* LLess (Operand0, Operand1) */ if (compare > 0) { goto cleanup; /* FALSE */ } if (compare < 0) { local_result = TRUE; goto cleanup; /* TRUE */ } /* Bytes match (to shortest length), compare lengths */ if (length0 < length1) { local_result = TRUE; } break; default: status = AE_AML_INTERNAL; break; } } cleanup: /* New object was created if implicit conversion performed - delete */ if (local_operand1 != operand1) { acpi_ut_remove_reference(local_operand1); } /* Return the logical result and status */ *logical_result = local_result; return_ACPI_STATUS(status); }
gpl-2.0
petkan/linux
drivers/net/wireless/ath/ath5k/debug.c
93
33677
/* * Copyright (c) 2007-2008 Bruno Randolf <bruno@thinktube.com> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/export.h> #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/vmalloc.h> #include "debug.h" #include "ath5k.h" #include "reg.h" #include "base.h" static unsigned int ath5k_debug; module_param_named(debug, ath5k_debug, uint, 0); /* debugfs: registers */ struct reg { const char *name; int addr; }; #define REG_STRUCT_INIT(r) { #r, r } /* just a few random registers, might want to add more */ static const struct reg regs[] = { REG_STRUCT_INIT(AR5K_CR), REG_STRUCT_INIT(AR5K_RXDP), REG_STRUCT_INIT(AR5K_CFG), REG_STRUCT_INIT(AR5K_IER), REG_STRUCT_INIT(AR5K_BCR), REG_STRUCT_INIT(AR5K_RTSD0), REG_STRUCT_INIT(AR5K_RTSD1), REG_STRUCT_INIT(AR5K_TXCFG), REG_STRUCT_INIT(AR5K_RXCFG), REG_STRUCT_INIT(AR5K_RXJLA), REG_STRUCT_INIT(AR5K_MIBC), REG_STRUCT_INIT(AR5K_TOPS), REG_STRUCT_INIT(AR5K_RXNOFRM), REG_STRUCT_INIT(AR5K_TXNOFRM), REG_STRUCT_INIT(AR5K_RPGTO), REG_STRUCT_INIT(AR5K_RFCNT), REG_STRUCT_INIT(AR5K_MISC), REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT), REG_STRUCT_INIT(AR5K_ISR), REG_STRUCT_INIT(AR5K_PISR), REG_STRUCT_INIT(AR5K_SISR0), REG_STRUCT_INIT(AR5K_SISR1), REG_STRUCT_INIT(AR5K_SISR2), REG_STRUCT_INIT(AR5K_SISR3), REG_STRUCT_INIT(AR5K_SISR4), REG_STRUCT_INIT(AR5K_IMR), REG_STRUCT_INIT(AR5K_PIMR), REG_STRUCT_INIT(AR5K_SIMR0), REG_STRUCT_INIT(AR5K_SIMR1), REG_STRUCT_INIT(AR5K_SIMR2), REG_STRUCT_INIT(AR5K_SIMR3), REG_STRUCT_INIT(AR5K_SIMR4), REG_STRUCT_INIT(AR5K_DCM_ADDR), REG_STRUCT_INIT(AR5K_DCCFG), REG_STRUCT_INIT(AR5K_CCFG), REG_STRUCT_INIT(AR5K_CPC0), REG_STRUCT_INIT(AR5K_CPC1), REG_STRUCT_INIT(AR5K_CPC2), REG_STRUCT_INIT(AR5K_CPC3), REG_STRUCT_INIT(AR5K_CPCOVF), REG_STRUCT_INIT(AR5K_RESET_CTL), REG_STRUCT_INIT(AR5K_SLEEP_CTL), REG_STRUCT_INIT(AR5K_INTPEND), REG_STRUCT_INIT(AR5K_SFR), REG_STRUCT_INIT(AR5K_PCICFG), REG_STRUCT_INIT(AR5K_GPIOCR), REG_STRUCT_INIT(AR5K_GPIODO), REG_STRUCT_INIT(AR5K_SREV), }; static void *reg_start(struct seq_file *seq, loff_t *pos) { return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static void reg_stop(struct seq_file *seq, void *p) { /* nothing to do */ } static void *reg_next(struct seq_file *seq, void *p, loff_t *pos) { ++*pos; return *pos < ARRAY_SIZE(regs) ? (void *)&regs[*pos] : NULL; } static int reg_show(struct seq_file *seq, void *p) { struct ath5k_hw *ah = seq->private; struct reg *r = p; seq_printf(seq, "%-25s0x%08x\n", r->name, ath5k_hw_reg_read(ah, r->addr)); return 0; } static const struct seq_operations register_seq_ops = { .start = reg_start, .next = reg_next, .stop = reg_stop, .show = reg_show }; static int open_file_registers(struct inode *inode, struct file *file) { struct seq_file *s; int res; res = seq_open(file, &register_seq_ops); if (res == 0) { s = file->private_data; s->private = inode->i_private; } return res; } static const struct file_operations fops_registers = { .open = open_file_registers, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; /* debugfs: beacons */ static ssize_t read_file_beacon(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[500]; unsigned int len = 0; unsigned int v; u64 tsf; v = ath5k_hw_reg_read(ah, AR5K_BEACON); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n", "AR5K_BEACON", v, v & AR5K_BEACON_PERIOD, (v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n", "AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP)); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n", "AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT)); v = ath5k_hw_reg_read(ah, AR5K_TIMER0); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER0 (TBTT)", v, v); v = ath5k_hw_reg_read(ah, AR5K_TIMER1); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER1 (DMA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER2); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER2 (SWBA)", v, v >> 3); v = ath5k_hw_reg_read(ah, AR5K_TIMER3); len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n", "AR5K_TIMER3 (ATIM)", v, v); tsf = ath5k_hw_get_tsf64(ah); len += snprintf(buf + len, sizeof(buf) - len, "TSF\t\t0x%016llx\tTU: %08x\n", (unsigned long long)tsf, TSF_TO_TU(tsf)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_beacon(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "disable", 7) == 0) { AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); pr_info("debugfs disable beacons\n"); } else if (strncmp(buf, "enable", 6) == 0) { AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); pr_info("debugfs enable beacons\n"); } return count; } static const struct file_operations fops_beacon = { .read = read_file_beacon, .write = write_file_beacon, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: reset */ static ssize_t write_file_reset(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n"); ieee80211_queue_work(ah->hw, &ah->reset_work); return count; } static const struct file_operations fops_reset = { .write = write_file_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = noop_llseek, }; /* debugfs: debug level */ static const struct { enum ath5k_debug_level level; const char *name; const char *desc; } dbg_info[] = { { ATH5K_DEBUG_RESET, "reset", "reset and initialization" }, { ATH5K_DEBUG_INTR, "intr", "interrupt handling" }, { ATH5K_DEBUG_MODE, "mode", "mode init/setup" }, { ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" }, { ATH5K_DEBUG_BEACON, "beacon", "beacon handling" }, { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, { ATH5K_DEBUG_LED, "led", "LED management" }, { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, { ATH5K_DEBUG_DESC, "desc", "descriptor chains" }, { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, }; static ssize_t read_file_debug(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; len += snprintf(buf + len, sizeof(buf) - len, "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level); for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) { len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level & dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); } len += snprintf(buf + len, sizeof(buf) - len, "%10s %c 0x%08x - %s\n", dbg_info[i].name, ah->debug.level == dbg_info[i].level ? '+' : ' ', dbg_info[i].level, dbg_info[i].desc); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_debug(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; for (i = 0; i < ARRAY_SIZE(dbg_info); i++) { if (strncmp(buf, dbg_info[i].name, strlen(dbg_info[i].name)) == 0) { ah->debug.level ^= dbg_info[i].level; /* toggle bit */ break; } } return count; } static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: antenna */ static ssize_t read_file_antenna(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; unsigned int i; unsigned int v; len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n", ah->ah_ant_mode); len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n", ah->ah_def_ant); len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n", ah->ah_tx_ant); len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n"); for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { len += snprintf(buf + len, sizeof(buf) - len, "[antenna %d]\t%d\t%d\n", i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]); } len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n", ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]); v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_STA_ID1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_DESC_ANTENNA\t%d\n", (v & AR5K_STA_ID1_DESC_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n", (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n", (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n", (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_RESTART_DIV_GC\t\t%x\n", (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S); v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n", (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0); len += snprintf(buf + len, sizeof(buf) - len, "\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v); v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_antenna(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; unsigned int i; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "diversity", 9) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); pr_info("debug: enable diversity\n"); } else if (strncmp(buf, "fixed-a", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); pr_info("debug: fixed antenna A\n"); } else if (strncmp(buf, "fixed-b", 7) == 0) { ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); pr_info("debug: fixed antenna B\n"); } else if (strncmp(buf, "clear", 5) == 0) { for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { ah->stats.antenna_rx[i] = 0; ah->stats.antenna_tx[i] = 0; } pr_info("debug: cleared antenna stats\n"); } return count; } static const struct file_operations fops_antenna = { .read = read_file_antenna, .write = write_file_antenna, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: misc */ static ssize_t read_file_misc(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; u32 filt = ath5k_hw_get_rx_filter(ah); len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n", ah->bssidmask); len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ", filt); if (filt & AR5K_RX_FILTER_UCAST) len += snprintf(buf + len, sizeof(buf) - len, " UCAST"); if (filt & AR5K_RX_FILTER_MCAST) len += snprintf(buf + len, sizeof(buf) - len, " MCAST"); if (filt & AR5K_RX_FILTER_BCAST) len += snprintf(buf + len, sizeof(buf) - len, " BCAST"); if (filt & AR5K_RX_FILTER_CONTROL) len += snprintf(buf + len, sizeof(buf) - len, " CONTROL"); if (filt & AR5K_RX_FILTER_BEACON) len += snprintf(buf + len, sizeof(buf) - len, " BEACON"); if (filt & AR5K_RX_FILTER_PROM) len += snprintf(buf + len, sizeof(buf) - len, " PROM"); if (filt & AR5K_RX_FILTER_XRPOLL) len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL"); if (filt & AR5K_RX_FILTER_PROBEREQ) len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ"); if (filt & AR5K_RX_FILTER_PHYERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212"); if (filt & AR5K_RX_FILTER_RADARERR_5212) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212"); if (filt & AR5K_RX_FILTER_PHYERR_5211) snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211"); if (filt & AR5K_RX_FILTER_RADARERR_5211) len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211"); len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n", ath_opmode_to_string(ah->opmode), ah->opmode); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_misc = { .read = read_file_misc, .open = simple_open, .owner = THIS_MODULE, }; /* debugfs: frameerrors */ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[700]; unsigned int len = 0; int i; len += snprintf(buf + len, sizeof(buf) - len, "RX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n", st->rxerr_crc, st->rx_all_count > 0 ? st->rxerr_crc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n", st->rxerr_phy, st->rx_all_count > 0 ? st->rxerr_phy * 100 / st->rx_all_count : 0); for (i = 0; i < 32; i++) { if (st->rxerr_phy_code[i]) len += snprintf(buf + len, sizeof(buf) - len, " phy_err[%u]\t%u\n", i, st->rxerr_phy_code[i]); } len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->rxerr_fifo, st->rx_all_count > 0 ? st->rxerr_fifo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n", st->rxerr_decrypt, st->rx_all_count > 0 ? st->rxerr_decrypt * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n", st->rxerr_mic, st->rx_all_count > 0 ? st->rxerr_mic * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n", st->rxerr_proc, st->rx_all_count > 0 ? st->rxerr_proc * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n", st->rxerr_jumbo, st->rx_all_count > 0 ? st->rxerr_jumbo * 100 / st->rx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n", st->rx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n", st->rx_bytes_count); len += snprintf(buf + len, sizeof(buf) - len, "\nTX\n---------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n", st->txerr_retry, st->tx_all_count > 0 ? st->txerr_retry * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n", st->txerr_fifo, st->tx_all_count > 0 ? st->txerr_fifo * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n", st->txerr_filt, st->tx_all_count > 0 ? st->txerr_filt * 100 / st->tx_all_count : 0); len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n", st->tx_all_count); len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n", st->tx_bytes_count); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_frameerrors(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "clear", 5) == 0) { st->rxerr_crc = 0; st->rxerr_phy = 0; st->rxerr_fifo = 0; st->rxerr_decrypt = 0; st->rxerr_mic = 0; st->rxerr_proc = 0; st->rxerr_jumbo = 0; st->rx_all_count = 0; st->txerr_retry = 0; st->txerr_fifo = 0; st->txerr_filt = 0; st->tx_all_count = 0; pr_info("debug: cleared frameerrors stats\n"); } return count; } static const struct file_operations fops_frameerrors = { .read = read_file_frameerrors, .write = write_file_frameerrors, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: ani */ static ssize_t read_file_ani(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; struct ath5k_statistics *st = &ah->stats; struct ath5k_ani_state *as = &ah->ani_state; char buf[700]; unsigned int len = 0; len += snprintf(buf + len, sizeof(buf) - len, "HW has PHY error counters:\t%s\n", ah->ah_capabilities.cap_has_phyerr_counters ? "yes" : "no"); len += snprintf(buf + len, sizeof(buf) - len, "HW max spur immunity level:\t%d\n", as->max_spur_level); len += snprintf(buf + len, sizeof(buf) - len, "\nANI state\n--------------------------------------------\n"); len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t"); switch (as->ani_mode) { case ATH5K_ANI_MODE_OFF: len += snprintf(buf + len, sizeof(buf) - len, "OFF\n"); break; case ATH5K_ANI_MODE_MANUAL_LOW: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL LOW\n"); break; case ATH5K_ANI_MODE_MANUAL_HIGH: len += snprintf(buf + len, sizeof(buf) - len, "MANUAL HIGH\n"); break; case ATH5K_ANI_MODE_AUTO: len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n"); break; default: len += snprintf(buf + len, sizeof(buf) - len, "??? (not good)\n"); break; } len += snprintf(buf + len, sizeof(buf) - len, "noise immunity level:\t\t%d\n", as->noise_imm_level); len += snprintf(buf + len, sizeof(buf) - len, "spur immunity level:\t\t%d\n", as->spur_level); len += snprintf(buf + len, sizeof(buf) - len, "firstep level:\t\t\t%d\n", as->firstep_level); len += snprintf(buf + len, sizeof(buf) - len, "OFDM weak signal detection:\t%s\n", as->ofdm_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "CCK weak signal detection:\t%s\n", as->cck_weak_sig ? "on" : "off"); len += snprintf(buf + len, sizeof(buf) - len, "\nMIB INTERRUPTS:\t\t%u\n", st->mib_intr); len += snprintf(buf + len, sizeof(buf) - len, "beacon RSSI average:\t%d\n", (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg)); #define CC_PRINT(_struct, _field) \ _struct._field, \ _struct.cycles > 0 ? \ _struct._field * 100 / _struct.cycles : 0 len += snprintf(buf + len, sizeof(buf) - len, "profcnt tx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, tx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt rx\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_frame)); len += snprintf(buf + len, sizeof(buf) - len, "profcnt busy\t\t%u\t(%d%%)\n", CC_PRINT(as->last_cc, rx_busy)); #undef CC_PRINT len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n", as->last_cc.cycles); len += snprintf(buf + len, sizeof(buf) - len, "listen time\t\t%d\tlast: %d\n", as->listen_time, as->last_listen); len += snprintf(buf + len, sizeof(buf) - len, "OFDM errors\t\t%u\tlast: %u\tsum: %u\n", as->ofdm_errors, as->last_ofdm_errors, as->sum_ofdm_errors); len += snprintf(buf + len, sizeof(buf) - len, "CCK errors\t\t%u\tlast: %u\tsum: %u\n", as->cck_errors, as->last_cck_errors, as->sum_cck_errors); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT1\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1), ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1))); len += snprintf(buf + len, sizeof(buf) - len, "AR5K_PHYERR_CNT2\t%x\t(=%d)\n", ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2), ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2))); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_ani(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "sens-low", 8) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH); } else if (strncmp(buf, "sens-high", 9) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW); } else if (strncmp(buf, "ani-off", 7) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); } else if (strncmp(buf, "ani-on", 6) == 0) { ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO); } else if (strncmp(buf, "noise-low", 9) == 0) { ath5k_ani_set_noise_immunity_level(ah, 0); } else if (strncmp(buf, "noise-high", 10) == 0) { ath5k_ani_set_noise_immunity_level(ah, ATH5K_ANI_MAX_NOISE_IMM_LVL); } else if (strncmp(buf, "spur-low", 8) == 0) { ath5k_ani_set_spur_immunity_level(ah, 0); } else if (strncmp(buf, "spur-high", 9) == 0) { ath5k_ani_set_spur_immunity_level(ah, ah->ani_state.max_spur_level); } else if (strncmp(buf, "fir-low", 7) == 0) { ath5k_ani_set_firstep_level(ah, 0); } else if (strncmp(buf, "fir-high", 8) == 0) { ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL); } else if (strncmp(buf, "ofdm-off", 8) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, false); } else if (strncmp(buf, "ofdm-on", 7) == 0) { ath5k_ani_set_ofdm_weak_signal_detection(ah, true); } else if (strncmp(buf, "cck-off", 7) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, false); } else if (strncmp(buf, "cck-on", 6) == 0) { ath5k_ani_set_cck_weak_signal_detection(ah, true); } return count; } static const struct file_operations fops_ani = { .read = read_file_ani, .write = write_file_ani, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: queues etc */ static ssize_t read_file_queue(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[700]; unsigned int len = 0; struct ath5k_txq *txq; struct ath5k_buf *bf, *bf0; int i, n; len += snprintf(buf + len, sizeof(buf) - len, "available txbuffers: %d\n", ah->txbuf_len); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { txq = &ah->txqs[i]; len += snprintf(buf + len, sizeof(buf) - len, "%02d: %ssetup\n", i, txq->setup ? "" : "not "); if (!txq->setup) continue; n = 0; spin_lock_bh(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) n++; spin_unlock_bh(&txq->lock); len += snprintf(buf + len, sizeof(buf) - len, " len: %d bufs: %d\n", txq->txq_len, n); len += snprintf(buf + len, sizeof(buf) - len, " stuck: %d\n", txq->txq_stuck); } if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_queue(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct ath5k_hw *ah = file->private_data; char buf[20]; count = min_t(size_t, count, sizeof(buf) - 1); if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[count] = '\0'; if (strncmp(buf, "start", 5) == 0) ieee80211_wake_queues(ah->hw); else if (strncmp(buf, "stop", 4) == 0) ieee80211_stop_queues(ah->hw); return count; } static const struct file_operations fops_queue = { .read = read_file_queue, .write = write_file_queue, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* debugfs: eeprom */ struct eeprom_private { u16 *buf; int len; }; static int open_file_eeprom(struct inode *inode, struct file *file) { struct eeprom_private *ep; struct ath5k_hw *ah = inode->i_private; bool res; int i, ret; u32 eesize; u16 val, *buf; /* Get eeprom size */ res = ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_UPPER, &val); if (!res) return -EACCES; if (val == 0) { eesize = AR5K_EEPROM_INFO_MAX + AR5K_EEPROM_INFO_BASE; } else { eesize = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << AR5K_EEPROM_SIZE_ENDLOC_SHIFT; ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_LOWER, &val); eesize = eesize | val; } if (eesize > 4096) return -EINVAL; /* Create buffer and read in eeprom */ buf = vmalloc(eesize); if (!buf) { ret = -ENOMEM; goto err; } for (i = 0; i < eesize; ++i) { AR5K_EEPROM_READ(i, val); buf[i] = val; } /* Create private struct and assign to file */ ep = kmalloc(sizeof(*ep), GFP_KERNEL); if (!ep) { ret = -ENOMEM; goto freebuf; } ep->buf = buf; ep->len = i; file->private_data = (void *)ep; return 0; freebuf: vfree(buf); err: return ret; } static ssize_t read_file_eeprom(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct eeprom_private *ep = file->private_data; return simple_read_from_buffer(user_buf, count, ppos, ep->buf, ep->len); } static int release_file_eeprom(struct inode *inode, struct file *file) { struct eeprom_private *ep = file->private_data; vfree(ep->buf); kfree(ep); return 0; } static const struct file_operations fops_eeprom = { .open = open_file_eeprom, .read = read_file_eeprom, .release = release_file_eeprom, .owner = THIS_MODULE, }; void ath5k_debug_init_device(struct ath5k_hw *ah) { struct dentry *phydir; ah->debug.level = ath5k_debug; phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir); if (!phydir) return; debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah, &fops_debug); debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers); debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah, &fops_beacon); debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset); debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah, &fops_antenna); debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); debugfs_create_file("eeprom", S_IRUSR, phydir, ah, &fops_eeprom); debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, &fops_frameerrors); debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani); debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah, &fops_queue); debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir, &ah->ah_use_32khz_clock); } /* functions used in other places */ void ath5k_debug_dump_bands(struct ath5k_hw *ah) { unsigned int b, i; if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS))) return; BUG_ON(!ah->sbands); for (b = 0; b < NUM_NL80211_BANDS; b++) { struct ieee80211_supported_band *band = &ah->sbands[b]; char bname[6]; switch (band->band) { case NL80211_BAND_2GHZ: strcpy(bname, "2 GHz"); break; case NL80211_BAND_5GHZ: strcpy(bname, "5 GHz"); break; default: printk(KERN_DEBUG "Band not supported: %d\n", band->band); return; } printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname, band->n_channels, band->n_bitrates); printk(KERN_DEBUG " channels:\n"); for (i = 0; i < band->n_channels; i++) printk(KERN_DEBUG " %3d %d %.4x %.4x\n", ieee80211_frequency_to_channel( band->channels[i].center_freq), band->channels[i].center_freq, band->channels[i].hw_value, band->channels[i].flags); printk(KERN_DEBUG " rates:\n"); for (i = 0; i < band->n_bitrates; i++) printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", band->bitrates[i].bitrate, band->bitrates[i].hw_value, band->bitrates[i].flags, band->bitrates[i].hw_value_short); } } static inline void ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done, struct ath5k_rx_status *rs) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx; printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1, rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1, !done ? ' ' : (rs->rs_status == 0) ? '*' : '!'); } void ath5k_debug_printrxbuffs(struct ath5k_hw *ah) { struct ath5k_desc *ds; struct ath5k_buf *bf; struct ath5k_rx_status rs = {}; int status; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; printk(KERN_DEBUG "rxdp %x, rxlink %p\n", ath5k_hw_get_rxdp(ah), ah->rxlink); spin_lock_bh(&ah->rxbuflock); list_for_each_entry(bf, &ah->rxbuf, list) { ds = bf->desc; status = ah->ah_proc_rx_desc(ah, ds, &rs); if (!status) ath5k_debug_printrxbuf(bf, status == 0, &rs); } spin_unlock_bh(&ah->rxbuflock); } void ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) { struct ath5k_desc *ds = bf->desc; struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212; struct ath5k_tx_status ts = {}; int done; if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC))) return; done = ah->ah_proc_tx_desc(ah, bf->desc, &ts); printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1, td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3, td->tx_stat.tx_status_0, td->tx_stat.tx_status_1, done ? ' ' : (ts.ts_status == 0) ? '*' : '!'); }
gpl-2.0
embeddedarm/linux-3.10-ts4800
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
349
34115
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include <linux/module.h> #include <drm/drmP.h> #include "vmwgfx_drv.h" #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_object.h> #include <drm/ttm/ttm_module.h> #define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 #define VMW_FB_RESERVATION 0 #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 /** * Fully encoded drm commands. Might move to vmw_drm.h */ #define DRM_IOCTL_VMW_GET_PARAM \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ struct drm_vmw_getparam_arg) #define DRM_IOCTL_VMW_ALLOC_DMABUF \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ union drm_vmw_alloc_dmabuf_arg) #define DRM_IOCTL_VMW_UNREF_DMABUF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ struct drm_vmw_unref_dmabuf_arg) #define DRM_IOCTL_VMW_CURSOR_BYPASS \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ struct drm_vmw_cursor_bypass_arg) #define DRM_IOCTL_VMW_CONTROL_STREAM \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ struct drm_vmw_control_stream_arg) #define DRM_IOCTL_VMW_CLAIM_STREAM \ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ struct drm_vmw_stream_arg) #define DRM_IOCTL_VMW_UNREF_STREAM \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ struct drm_vmw_stream_arg) #define DRM_IOCTL_VMW_CREATE_CONTEXT \ DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ struct drm_vmw_context_arg) #define DRM_IOCTL_VMW_UNREF_CONTEXT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ struct drm_vmw_context_arg) #define DRM_IOCTL_VMW_CREATE_SURFACE \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ union drm_vmw_surface_create_arg) #define DRM_IOCTL_VMW_UNREF_SURFACE \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ struct drm_vmw_surface_arg) #define DRM_IOCTL_VMW_REF_SURFACE \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ union drm_vmw_surface_reference_arg) #define DRM_IOCTL_VMW_EXECBUF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ struct drm_vmw_execbuf_arg) #define DRM_IOCTL_VMW_GET_3D_CAP \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ struct drm_vmw_get_3d_cap_arg) #define DRM_IOCTL_VMW_FENCE_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ struct drm_vmw_fence_wait_arg) #define DRM_IOCTL_VMW_FENCE_SIGNALED \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ struct drm_vmw_fence_signaled_arg) #define DRM_IOCTL_VMW_FENCE_UNREF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ struct drm_vmw_fence_arg) #define DRM_IOCTL_VMW_FENCE_EVENT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ struct drm_vmw_fence_event_arg) #define DRM_IOCTL_VMW_PRESENT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ struct drm_vmw_present_arg) #define DRM_IOCTL_VMW_PRESENT_READBACK \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ struct drm_vmw_present_readback_arg) #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ struct drm_vmw_update_layout_arg) /** * The core DRM version of this macro doesn't account for * DRM_COMMAND_BASE. */ #define VMW_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} /** * Ioctl definitions. */ static struct drm_ioctl_desc vmw_ioctls[] = { VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, vmw_kms_cursor_bypass_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, vmw_fence_obj_signaled_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, DRM_AUTH | DRM_UNLOCKED), /* these allow direct access to the framebuffers mark as master only */ VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_PRESENT_READBACK, vmw_present_readback_ioctl, DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, DRM_MASTER | DRM_UNLOCKED), }; static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); module_param_named(enable_fbdev, enable_fbdev, int, 0600); static void vmw_print_capabilities(uint32_t capabilities) { DRM_INFO("Capabilities:\n"); if (capabilities & SVGA_CAP_RECT_COPY) DRM_INFO(" Rect copy.\n"); if (capabilities & SVGA_CAP_CURSOR) DRM_INFO(" Cursor.\n"); if (capabilities & SVGA_CAP_CURSOR_BYPASS) DRM_INFO(" Cursor bypass.\n"); if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) DRM_INFO(" Cursor bypass 2.\n"); if (capabilities & SVGA_CAP_8BIT_EMULATION) DRM_INFO(" 8bit emulation.\n"); if (capabilities & SVGA_CAP_ALPHA_CURSOR) DRM_INFO(" Alpha cursor.\n"); if (capabilities & SVGA_CAP_3D) DRM_INFO(" 3D.\n"); if (capabilities & SVGA_CAP_EXTENDED_FIFO) DRM_INFO(" Extended Fifo.\n"); if (capabilities & SVGA_CAP_MULTIMON) DRM_INFO(" Multimon.\n"); if (capabilities & SVGA_CAP_PITCHLOCK) DRM_INFO(" Pitchlock.\n"); if (capabilities & SVGA_CAP_IRQMASK) DRM_INFO(" Irq mask.\n"); if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) DRM_INFO(" Display Topology.\n"); if (capabilities & SVGA_CAP_GMR) DRM_INFO(" GMR.\n"); if (capabilities & SVGA_CAP_TRACES) DRM_INFO(" Traces.\n"); if (capabilities & SVGA_CAP_GMR2) DRM_INFO(" GMR2.\n"); if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) DRM_INFO(" Screen Object 2.\n"); } /** * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at * the start of a buffer object. * * @dev_priv: The device private structure. * * This function will idle the buffer using an uninterruptible wait, then * map the first page and initialize a pending occlusion query result structure, * Finally it will unmap the buffer. * * TODO: Since we're only mapping a single page, we should optimize the map * to use kmap_atomic / iomap_atomic. */ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) { struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; int ret; struct ttm_bo_device *bdev = &dev_priv->bdev; struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; ttm_bo_reserve(bo, false, false, false, 0); spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ); ret = ttm_bo_kmap(bo, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); result->state = SVGA3D_QUERYSTATE_PENDING; result->result32 = 0xff; ttm_bo_kunmap(&map); } else DRM_ERROR("Dummy query buffer map failed.\n"); ttm_bo_unreserve(bo); } /** * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * * @dev_priv: A device private structure. * * This function creates a small buffer object that holds the query * result for dummy queries emitted as query barriers. * No interruptible waits are done within this function. * * Returns an error if bo creation fails. */ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { return ttm_bo_create(&dev_priv->bdev, PAGE_SIZE, ttm_bo_type_device, &vmw_vram_sys_placement, 0, false, NULL, &dev_priv->dummy_query_bo); } static int vmw_request_device(struct vmw_private *dev_priv) { int ret; ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); if (unlikely(ret != 0)) { DRM_ERROR("Unable to initialize FIFO.\n"); return ret; } vmw_fence_fifo_up(dev_priv->fman); ret = vmw_dummy_query_bo_create(dev_priv); if (unlikely(ret != 0)) goto out_no_query_bo; vmw_dummy_query_bo_prepare(dev_priv); return 0; out_no_query_bo: vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); return ret; } static void vmw_release_device(struct vmw_private *dev_priv) { /* * Previous destructions should've released * the pinned bo. */ BUG_ON(dev_priv->pinned_bo != NULL); ttm_bo_unref(&dev_priv->dummy_query_bo); vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); } /** * Increase the 3d resource refcount. * If the count was prevously zero, initialize the fifo, switching to svga * mode. Note that the master holds a ref as well, and may request an * explicit switch to svga mode if fb is not running, using @unhide_svga. */ int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga) { int ret = 0; mutex_lock(&dev_priv->release_mutex); if (unlikely(dev_priv->num_3d_resources++ == 0)) { ret = vmw_request_device(dev_priv); if (unlikely(ret != 0)) --dev_priv->num_3d_resources; } else if (unhide_svga) { mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) & ~SVGA_REG_ENABLE_HIDE); mutex_unlock(&dev_priv->hw_mutex); } mutex_unlock(&dev_priv->release_mutex); return ret; } /** * Decrease the 3d resource refcount. * If the count reaches zero, disable the fifo, switching to vga mode. * Note that the master holds a refcount as well, and may request an * explicit switch to vga mode when it releases its refcount to account * for the situation of an X server vt switch to VGA with 3d resources * active. */ void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga) { int32_t n3d; mutex_lock(&dev_priv->release_mutex); if (unlikely(--dev_priv->num_3d_resources == 0)) vmw_release_device(dev_priv); else if (hide_svga) { mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_read(dev_priv, SVGA_REG_ENABLE) | SVGA_REG_ENABLE_HIDE); mutex_unlock(&dev_priv->hw_mutex); } n3d = (int32_t) dev_priv->num_3d_resources; mutex_unlock(&dev_priv->release_mutex); BUG_ON(n3d < 0); } /** * Sets the initial_[width|height] fields on the given vmw_private. * * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then * clamping the value to fb_max_[width|height] fields and the * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. * If the values appear to be invalid, set them to * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. */ static void vmw_get_initial_size(struct vmw_private *dev_priv) { uint32_t width; uint32_t height; width = vmw_read(dev_priv, SVGA_REG_WIDTH); height = vmw_read(dev_priv, SVGA_REG_HEIGHT); width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); if (width > dev_priv->fb_max_width || height > dev_priv->fb_max_height) { /* * This is a host error and shouldn't occur. */ width = VMW_MIN_INITIAL_WIDTH; height = VMW_MIN_INITIAL_HEIGHT; } dev_priv->initial_width = width; dev_priv->initial_height = height; } static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { struct vmw_private *dev_priv; int ret; uint32_t svga_id; enum vmw_res_type i; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } pci_set_master(dev->pdev); dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); } mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; atomic_set(&dev_priv->fifo_queue_waiters, 0); dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); dev_priv->enable_fb = enable_fbdev; mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); mutex_unlock(&dev_priv->hw_mutex); goto out_err0; } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); vmw_get_initial_size(dev_priv); if (dev_priv->capabilities & SVGA_CAP_GMR) { dev_priv->max_gmr_descriptors = vmw_read(dev_priv, SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); dev_priv->max_gmr_ids = vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); } if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_pages = vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); dev_priv->memory_size = vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); dev_priv->memory_size -= dev_priv->vram_size; } else { /* * An arbitrary limit of 512MiB on surface * memory. But all HWV8 hardware supports GMR2. */ dev_priv->memory_size = 512*1024*1024; } mutex_unlock(&dev_priv->hw_mutex); vmw_print_capabilities(dev_priv->capabilities); if (dev_priv->capabilities & SVGA_CAP_GMR) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); DRM_INFO("Max GMR descriptors is %u\n", (unsigned)dev_priv->max_gmr_descriptors); } if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } DRM_INFO("VRAM at 0x%08x size is %u kiB\n", dev_priv->vram_start, dev_priv->vram_size / 1024); DRM_INFO("MMIO at 0x%08x size is %u kiB\n", dev_priv->mmio_start, dev_priv->mmio_size / 1024); ret = vmw_ttm_global_init(dev_priv); if (unlikely(ret != 0)) goto out_err0; vmw_master_init(&dev_priv->fbdev_master); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; ret = ttm_bo_device_init(&dev_priv->bdev, dev_priv->bo_global_ref.ref.object, &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_err1; } ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, (dev_priv->vram_size >> PAGE_SHIFT)); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing memory manager for VRAM.\n"); goto out_err2; } dev_priv->has_gmr = true; if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, dev_priv->max_gmr_ids) != 0) { DRM_INFO("No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, dev_priv->mmio_size); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; DRM_ERROR("Failed mapping MMIO.\n"); goto out_err3; } /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && !vmw_fifo_have_pitchlock(dev_priv)) { ret = -ENOSYS; DRM_ERROR("Hardware has no pitchlock\n"); goto out_err4; } dev_priv->tdev = ttm_object_device_init (dev_priv->mem_global_ref.object, 12); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); ret = -ENOMEM; goto out_err4; } dev->dev_private = dev_priv; ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { /** * Request at least the mmio PCI resource. */ DRM_INFO("It appears like vesafb is loaded. " "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } } if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { ret = drm_irq_install(dev); if (ret != 0) { DRM_ERROR("Failed installing irq: %d\n", ret); goto out_no_irq; } } dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) goto out_no_fman; vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; vmw_overlay_init(dev_priv); if (dev_priv->enable_fb) { ret = vmw_3d_resource_inc(dev_priv, true); if (unlikely(ret != 0)) goto out_no_fifo; vmw_fb_init(dev_priv); } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); return 0; out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); out_no_irq: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: iounmap(dev_priv->mmio_virt); out_err3: drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); kfree(dev_priv); return ret; } static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.res_ht_initialized) drm_ht_remove(&dev_priv->ctx.res_ht); if (dev_priv->ctx.cmd_bounce) vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { vmw_fb_close(dev_priv); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, false); } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, dev_priv->mmio_size, DRM_MTRR_WC); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); kfree(dev_priv); return 0; } static void vmw_preclose(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_private *dev_priv = vmw_priv(dev); vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); } static void vmw_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_fpriv *vmw_fp; vmw_fp = vmw_fpriv(file_priv); ttm_object_file_release(&vmw_fp->tfile); if (vmw_fp->locked_master) drm_master_put(&vmw_fp->locked_master); kfree(vmw_fp); } static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp; int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); if (unlikely(vmw_fp == NULL)) return ret; INIT_LIST_HEAD(&vmw_fp->fence_events); vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; file_priv->driver_priv = vmw_fp; dev_priv->bdev.dev_mapping = dev->dev_mapping; return 0; out_no_tfile: kfree(vmw_fp); return ret; } static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; unsigned int nr = DRM_IOCTL_NR(cmd); /* * Do extra checking on driver private ioctls. */ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; if (unlikely(ioctl->cmd_drv != cmd)) { DRM_ERROR("Invalid command format, ioctl %d\n", nr - DRM_COMMAND_BASE); return -EINVAL; } } return drm_ioctl(filp, cmd, arg); } static int vmw_firstopen(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); dev_priv->is_opened = true; return 0; } static void vmw_lastclose(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_crtc *crtc; struct drm_mode_set set; int ret; /** * Do nothing on the lastclose call from drm_unload. */ if (!dev_priv->is_opened) return; dev_priv->is_opened = false; set.x = 0; set.y = 0; set.fb = NULL; set.mode = NULL; set.connectors = NULL; set.num_connectors = 0; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { set.crtc = crtc; ret = drm_mode_set_config_internal(&set); WARN_ON(ret != 0); } } static void vmw_master_init(struct vmw_master *vmaster) { ttm_lock_init(&vmaster->lock); INIT_LIST_HEAD(&vmaster->fb_surf); mutex_init(&vmaster->fb_surf_mutex); } static int vmw_master_create(struct drm_device *dev, struct drm_master *master) { struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); if (unlikely(vmaster == NULL)) return -ENOMEM; vmw_master_init(vmaster); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); master->driver_priv = vmaster; return 0; } static void vmw_master_destroy(struct drm_device *dev, struct drm_master *master) { struct vmw_master *vmaster = vmw_master(master); master->driver_priv = NULL; kfree(vmaster); } static int vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *active = dev_priv->active_master; struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; if (!dev_priv->enable_fb) { ret = vmw_3d_resource_inc(dev_priv, true); if (unlikely(ret != 0)) return ret; vmw_kms_save_vga(dev_priv); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 0); mutex_unlock(&dev_priv->hw_mutex); } if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); if (unlikely(ret != 0)) goto out_no_active_lock; ttm_lock_set_kill(&active->lock, true, SIGTERM); ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) { DRM_ERROR("Unable to clean VRAM on " "master drop.\n"); } dev_priv->active_master = NULL; } ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); if (!from_open) { ttm_vt_unlock(&vmaster->lock); BUG_ON(vmw_fp->locked_master != file_priv->master); drm_master_put(&vmw_fp->locked_master); } dev_priv->active_master = vmaster; return 0; out_no_active_lock: if (!dev_priv->enable_fb) { vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); } return ret; } static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv, bool from_release) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /** * Make sure the master doesn't disappear while we have * it locked. */ vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); vmw_execbuf_release_pinned_bo(dev_priv); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); drm_master_put(&vmw_fp->locked_master); } ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); if (!dev_priv->enable_fb) { ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); if (unlikely(ret != 0)) DRM_ERROR("Unable to clean VRAM on master drop.\n"); vmw_kms_restore_vga(dev_priv); vmw_3d_resource_dec(dev_priv, true); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_TRACES, 1); mutex_unlock(&dev_priv->hw_mutex); } dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); if (dev_priv->enable_fb) vmw_fb_on(dev_priv); } static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_put_dev(dev); } static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { struct vmw_private *dev_priv = container_of(nb, struct vmw_private, pm_nb); struct vmw_master *vmaster = dev_priv->active_master; switch (val) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: ttm_suspend_lock(&vmaster->lock); /** * This empties VRAM and unbinds all GMR bindings. * Buffer contents is moved to swappable memory. */ vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); ttm_bo_swapout_all(&dev_priv->bdev); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: case PM_POST_RESTORE: ttm_suspend_unlock(&vmaster->lock); break; case PM_RESTORE_PREPARE: break; default: break; } return 0; } /** * These might not be needed with the virtual SVGA device. */ static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); if (dev_priv->num_3d_resources != 0) { DRM_INFO("Can't suspend or hibernate " "while 3D resources are active.\n"); return -EBUSY; } pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int vmw_pci_resume(struct pci_dev *pdev) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return pci_enable_device(pdev); } static int vmw_pm_suspend(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct pm_message dummy; dummy.event = 0; return vmw_pci_suspend(pdev, dummy); } static int vmw_pm_resume(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); return vmw_pci_resume(pdev); } static int vmw_pm_prepare(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); /** * Release 3d reference held by fbdev and potentially * stop fifo. */ dev_priv->suspended = true; if (dev_priv->enable_fb) vmw_3d_resource_dec(dev_priv, true); if (dev_priv->num_3d_resources != 0) { DRM_INFO("Can't suspend or hibernate " "while 3D resources are active.\n"); if (dev_priv->enable_fb) vmw_3d_resource_inc(dev_priv, true); dev_priv->suspended = false; return -EBUSY; } return 0; } static void vmw_pm_complete(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); mutex_unlock(&dev_priv->hw_mutex); /** * Reclaim 3d reference held by fbdev and potentially * start fifo. */ if (dev_priv->enable_fb) vmw_3d_resource_inc(dev_priv, false); dev_priv->suspended = false; } static const struct dev_pm_ops vmw_pm_ops = { .prepare = vmw_pm_prepare, .complete = vmw_pm_complete, .suspend = vmw_pm_suspend, .resume = vmw_pm_resume, }; static const struct file_operations vmwgfx_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = vmw_unlocked_ioctl, .mmap = vmw_mmap, .poll = vmw_fops_poll, .read = vmw_fops_read, .fasync = drm_fasync, #if defined(CONFIG_COMPAT) .compat_ioctl = drm_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, .load = vmw_driver_load, .unload = vmw_driver_unload, .firstopen = vmw_firstopen, .lastclose = vmw_lastclose, .irq_preinstall = vmw_irq_preinstall, .irq_postinstall = vmw_irq_postinstall, .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, .get_vblank_counter = vmw_get_vblank_counter, .enable_vblank = vmw_enable_vblank, .disable_vblank = vmw_disable_vblank, .ioctls = vmw_ioctls, .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), .dma_quiescent = NULL, /*vmw_dma_quiescent, */ .master_create = vmw_master_create, .master_destroy = vmw_master_destroy, .master_set = vmw_master_set, .master_drop = vmw_master_drop, .open = vmw_driver_open, .preclose = vmw_preclose, .postclose = vmw_postclose, .dumb_create = vmw_dumb_create, .dumb_map_offset = vmw_dumb_map_offset, .dumb_destroy = vmw_dumb_destroy, .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, .date = VMWGFX_DRIVER_DATE, .major = VMWGFX_DRIVER_MAJOR, .minor = VMWGFX_DRIVER_MINOR, .patchlevel = VMWGFX_DRIVER_PATCHLEVEL }; static struct pci_driver vmw_pci_driver = { .name = VMWGFX_DRIVER_NAME, .id_table = vmw_pci_id_list, .probe = vmw_probe, .remove = vmw_remove, .driver = { .pm = &vmw_pm_ops } }; static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { return drm_get_pci_dev(pdev, ent, &driver); } static int __init vmwgfx_init(void) { int ret; ret = drm_pci_init(&driver, &vmw_pci_driver); if (ret) DRM_ERROR("Failed initializing DRM.\n"); return ret; } static void __exit vmwgfx_exit(void) { drm_pci_exit(&driver, &vmw_pci_driver); } module_init(vmwgfx_init); module_exit(vmwgfx_exit); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); MODULE_LICENSE("GPL and additional rights"); MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." __stringify(VMWGFX_DRIVER_MINOR) "." __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." "0");
gpl-2.0
Talustus/dreamkernel_ics_sghi777
net/ipv6/raw.c
1117
31805
/* * RAW sockets for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Adapted from linux/net/ipv4/raw.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/slab.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/skbuff.h> #include <linux/compat.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/addrconf.h> #include <net/transp_v6.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/tcp_states.h> #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #include <net/mip6.h> #endif #include <linux/mroute6.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/xfrm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static struct raw_hashinfo raw_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), }; static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, const struct in6_addr *loc_addr, const struct in6_addr *rmt_addr, int dif) { struct hlist_node *node; int is_multicast = ipv6_addr_is_multicast(loc_addr); sk_for_each_from(sk, node) if (inet_sk(sk)->inet_num == num) { struct ipv6_pinfo *np = inet6_sk(sk); if (!net_eq(sock_net(sk), net)) continue; if (!ipv6_addr_any(&np->daddr) && !ipv6_addr_equal(&np->daddr, rmt_addr)) continue; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; if (!ipv6_addr_any(&np->rcv_saddr)) { if (ipv6_addr_equal(&np->rcv_saddr, loc_addr)) goto found; if (is_multicast && inet6_mc_check(sk, loc_addr, rmt_addr)) goto found; continue; } goto found; } sk = NULL; found: return sk; } /* * 0 - deliver * 1 - block */ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) { struct icmp6hdr *icmph; struct raw6_sock *rp = raw6_sk(sk); if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { __u32 *data = &rp->filter.data[0]; int bit_nr; icmph = (struct icmp6hdr *) skb->data; bit_nr = icmph->icmp6_type; return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; } return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); static mh_filter_t __rcu *mh_filter __read_mostly; int rawv6_mh_filter_register(mh_filter_t filter) { rcu_assign_pointer(mh_filter, filter); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_register); int rawv6_mh_filter_unregister(mh_filter_t filter) { rcu_assign_pointer(mh_filter, NULL); synchronize_rcu(); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_unregister); #endif /* * demultiplex raw sockets. * (should consider queueing the skb in the sock receive_queue * without calling rawv6.c) * * Caller owns SKB so we must make clones. */ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) { const struct in6_addr *saddr; const struct in6_addr *daddr; struct sock *sk; int delivered = 0; __u8 hash; struct net *net; saddr = &ipv6_hdr(skb)->saddr; daddr = saddr + 1; hash = nexthdr & (MAX_INET_PROTOS - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk == NULL) goto out; net = dev_net(skb->dev); sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); while (sk) { int filtered; delivered = 1; switch (nexthdr) { case IPPROTO_ICMPV6: filtered = icmpv6_filter(sk, skb); break; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPPROTO_MH: { /* XXX: To validate MH only once for each packet, * this is placed here. It should be after checking * xfrm policy, however it doesn't. The checking xfrm * policy is placed in rawv6_rcv() because it is * required for each socket. */ mh_filter_t *filter; filter = rcu_dereference(mh_filter); filtered = filter ? (*filter)(sk, skb) : 0; break; } #endif default: filtered = 0; break; } if (filtered < 0) break; if (filtered == 0) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) { nf_reset(clone); rawv6_rcv(sk, clone); } } sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, IP6CB(skb)->iif); } out: read_unlock(&raw_v6_hashinfo.lock); return delivered; } int raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; return raw_sk != NULL; } /* This cleans up af_inet6 a bit. -DaveM */ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; __be32 v4addr = 0; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; addr_type = ipv6_addr_type(&addr->sin6_addr); /* Raw sockets are IPv6 only */ if (addr_type == IPV6_ADDR_MAPPED) return -EADDRNOTAVAIL; lock_sock(sk); err = -EINVAL; if (sk->sk_state != TCP_CLOSE) goto out; rcu_read_lock(); /* Check if the address belongs to the host. */ if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another * one is supplied by user. */ sk->sk_bound_dev_if = addr->sin6_scope_id; } /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) goto out_unlock; err = -ENODEV; dev = dev_get_by_index_rcu(sock_net(sk), sk->sk_bound_dev_if); if (!dev) goto out_unlock; } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, dev, 0)) { goto out_unlock; } } } inet->inet_rcv_saddr = inet->inet_saddr = v4addr; ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) ipv6_addr_copy(&np->saddr, &addr->sin6_addr); err = 0; out_unlock: rcu_read_unlock(); out: release_sock(sk); return err; } static void rawv6_err(struct sock *sk, struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); int err; int harderr; /* Report error on raw socket, if: 1. User requested recverr. 2. Socket is connected (otherwise the error indication is useless without recverr and error is hard. */ if (!np->recverr && sk->sk_state != TCP_ESTABLISHED) return; harderr = icmpv6_err_convert(type, code, &err); if (type == ICMPV6_PKT_TOOBIG) harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); if (np->recverr) { u8 *payload = skb->data; if (!inet->hdrincl) payload += offset; ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload); } if (np->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } } void raw6_icmp_error(struct sk_buff *skb, int nexthdr, u8 type, u8 code, int inner_offset, __be32 info) { struct sock *sk; int hash; const struct in6_addr *saddr, *daddr; struct net *net; hash = nexthdr & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk != NULL) { /* Note: ipv6_hdr(skb) != skb->data */ const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data; saddr = &ip6h->saddr; daddr = &ip6h->daddr; net = dev_net(skb->dev); while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, IP6CB(skb)->iif))) { rawv6_err(sk, skb, NULL, type, code, inner_offset, info); sk = sk_next(sk); } } read_unlock(&raw_v6_hashinfo.lock); } static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) { if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) && skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } /* Charge it to the socket. */ if (ip_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return 0; } /* * This is next to useless... * if we demultiplex in network layer we don't need the extra call * just to queue the skb... * maybe we could have the network decide upon a hint if it * should call raw_rcv for demultiplexing */ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); struct raw6_sock *rp = raw6_sk(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } if (!rp->checksum) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed == CHECKSUM_COMPLETE) { skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->inet_num, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->inet_num, 0)); if (inet->hdrincl) { if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } } rawv6_rcv_skb(sk, skb); return 0; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; goto out; } static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct raw6_sock *rp) { struct sk_buff *skb; int err = 0; int offset; int len; int total_len; __wsum tmp_csum; __sum16 csum; if (!rp->checksum) goto send; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; offset = rp->offset; total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) - skb->data); if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); goto out; } /* should be check HW csum miyazawa */ if (skb_queue_len(&sk->sk_write_queue) == 1) { /* * Only one fragment on the socket. */ tmp_csum = skb->csum; } else { struct sk_buff *csum_skb = NULL; tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); if (csum_skb) continue; len = skb->len - skb_transport_offset(skb); if (offset >= len) { offset -= len; continue; } csum_skb = skb; } skb = csum_skb; } offset += skb_transport_offset(skb); if (skb_copy_bits(skb, offset, &csum, 2)) BUG(); /* in case cksum was not initialized */ if (unlikely(csum)) tmp_csum = csum_sub(tmp_csum, csum_unfold(csum)); csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, total_len, fl6->flowi6_proto, tmp_csum); if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP) csum = CSUM_MANGLED_0; if (skb_store_bits(skb, offset, &csum, 2)) BUG(); send: err = ip6_push_pending_frames(sk); out: return err; } static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, struct flowi6 *fl6, struct dst_entry **dstp, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *iph; struct sk_buff *skb; int err; struct rt6_info *rt = (struct rt6_info *)*dstp; if (length > rt->dst.dev->mtu) { ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, &rt->dst); *dstp = NULL; skb_put(skb, length); skb_reset_network_header(skb); iph = ipv6_hdr(skb); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_fault: err = -EFAULT; kfree_skb(skb); error: IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !np->recverr) err = 0; return err; } static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg) { struct iovec *iov; u8 __user *type = NULL; u8 __user *code = NULL; u8 len = 0; int probed = 0; int i; if (!msg->msg_iov) return 0; for (i = 0; i < msg->msg_iovlen; i++) { iov = &msg->msg_iov[i]; if (!iov) continue; switch (fl6->flowi6_proto) { case IPPROTO_ICMPV6: /* check if one-byte field is readable or not. */ if (iov->iov_base && iov->iov_len < 1) break; if (!type) { type = iov->iov_base; /* check if code field is readable or not. */ if (iov->iov_len > 1) code = type + 1; } else if (!code) code = iov->iov_base; if (type && code) { if (get_user(fl6->fl6_icmp_type, type) || get_user(fl6->fl6_icmp_code, code)) return -EFAULT; probed = 1; } break; case IPPROTO_MH: if (iov->iov_base && iov->iov_len < 1) break; /* check if type field is readable or not. */ if (iov->iov_len > 2 - len) { u8 __user *p = iov->iov_base; if (get_user(fl6->fl6_mh_type, &p[2 - len])) return -EFAULT; probed = 1; } else len += iov->iov_len; break; default: probed = 1; break; } if (probed) break; } return 0; } static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; struct in6_addr *daddr, *final_p, final; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct raw6_sock *rp = raw6_sk(sk); struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; int dontfrag = -1; u16 proto; int err; /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX) return -EMSGSIZE; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Get and verify the address. */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_mark = sk->sk_mark; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (sin6->sin6_family && sin6->sin6_family != AF_INET6) return -EAFNOSUPPORT; /* port is the proto value [0..255] carried in nexthdr */ proto = ntohs(sin6->sin6_port); if (!proto) proto = inet->inet_num; else if (proto != inet->inet_num) return -EINVAL; if (proto > 255) return -EINVAL; daddr = &sin6->sin6_addr; if (np->sndflow) { fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; daddr = &flowlabel->dst; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &np->daddr)) daddr = &np->daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; proto = inet->inet_num; daddr = &np->daddr; fl6.flowlabel = np->flow_label; } if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; } if (opt == NULL) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = proto; err = rawv6_probe_proto_opt(&fl6, msg); if (err) goto out; if (!ipv6_addr_any(daddr)) ipv6_addr_copy(&fl6.daddr, daddr); else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&fl6.saddr, &np->saddr); final_p = fl6_update_dst(&fl6, opt, &final); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; } if (hlimit < 0) { if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); } if (tclass < 0) tclass = np->tclass; if (dontfrag < 0) dontfrag = np->dontfrag; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: if (inet->hdrincl) err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags); else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst, msg->msg_flags, dontfrag); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) err = rawv6_push_pending_frames(sk, &fl6, rp); release_sock(sk); } done: dst_release(dst); out: fl6_sock_release(flowlabel); return err<0?err:len; do_confirm: dst_confirm(dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int optlen) { switch (optname) { case ICMPV6_FILTER: if (optlen > sizeof(struct icmp6_filter)) optlen = sizeof(struct icmp6_filter); if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int len; switch (optname) { case ICMPV6_FILTER: if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (len > sizeof(struct icmp6_filter)) len = sizeof(struct icmp6_filter); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct raw6_sock *rp = raw6_sk(sk); int val; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && level == IPPROTO_IPV6) { /* * RFC3542 tells that IPV6_CHECKSUM socket * option in the IPPROTO_IPV6 level is not * allowed on ICMPv6 sockets. * If you want to set it, use IPPROTO_RAW * level IPV6_CHECKSUM socket option * (Linux extension). */ return -EINVAL; } /* You may get strange result with a positive odd offset; RFC2292bis agrees with me. */ if (val > 0 && (val&1)) return -EINVAL; if (val < 0) { rp->checksum = 0; } else { rp->checksum = 1; rp->offset = val; } return 0; break; default: return -ENOPROTOOPT; } } static int rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #endif static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct raw6_sock *rp = raw6_sk(sk); int val, len; if (get_user(len,optlen)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: /* * We allow getsockopt() for IPPROTO_IPV6-level * IPV6_CHECKSUM socket option on ICMPv6 sockets * since RFC3542 is silent about it. */ if (rp->checksum == 0) val = -1; else val = rp->offset; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval,&val,len)) return -EFAULT; return 0; } static int rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #endif static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch(cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) amount = skb->tail - skb->transport_header; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } default: #ifdef CONFIG_IPV6_MROUTE return ip6mr_ioctl(sk, cmd, (void __user *)arg); #else return -ENOIOCTLCMD; #endif } } #ifdef CONFIG_COMPAT static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IPV6_MROUTE return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } } #endif static void rawv6_close(struct sock *sk, long timeout) { if (inet_sk(sk)->inet_num == IPPROTO_RAW) ip6_ra_control(sk, -1); ip6mr_sk_done(sk); sk_common_release(sk); } static void raw6_destroy(struct sock *sk) { lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) { struct raw6_sock *rp = raw6_sk(sk); switch (inet_sk(sk)->inet_num) { case IPPROTO_ICMPV6: rp->checksum = 1; rp->offset = 2; break; case IPPROTO_MH: rp->checksum = 1; rp->offset = 4; break; default: break; } return 0; } struct proto rawv6_prot = { .name = "RAWv6", .owner = THIS_MODULE, .close = rawv6_close, .destroy = raw6_destroy, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = rawv6_ioctl, .init = rawv6_init_sk, .setsockopt = rawv6_setsockopt, .getsockopt = rawv6_getsockopt, .sendmsg = rawv6_sendmsg, .recvmsg = rawv6_recvmsg, .bind = rawv6_bind, .backlog_rcv = rawv6_rcv_skb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw6_sock), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_rawv6_setsockopt, .compat_getsockopt = compat_rawv6_getsockopt, .compat_ioctl = compat_rawv6_ioctl, #endif }; #ifdef CONFIG_PROC_FS static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct ipv6_pinfo *np = inet6_sk(sp); const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = 0; srcp = inet_sk(sp)->inet_num; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } static int raw6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode ref pointer drops\n"); else raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw6_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw6_seq_show, }; static int raw6_seq_open(struct inode *inode, struct file *file) { return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops); } static const struct file_operations raw6_seq_fops = { .owner = THIS_MODULE, .open = raw6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init raw6_init_net(struct net *net) { if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) return -ENOMEM; return 0; } static void __net_exit raw6_exit_net(struct net *net) { proc_net_remove(net, "raw6"); } static struct pernet_operations raw6_net_ops = { .init = raw6_init_net, .exit = raw6_exit_net, }; int __init raw6_proc_init(void) { return register_pernet_subsys(&raw6_net_ops); } void raw6_proc_exit(void) { unregister_pernet_subsys(&raw6_net_ops); } #endif /* CONFIG_PROC_FS */ /* Same as inet6_dgram_ops, sans udp_poll. */ static const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, .poll = datagram_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = sock_common_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw rawv6_protosw = { .type = SOCK_RAW, .protocol = IPPROTO_IP, /* wild card */ .prot = &rawv6_prot, .ops = &inet6_sockraw_ops, .no_check = UDP_CSUM_DEFAULT, .flags = INET_PROTOSW_REUSE, }; int __init rawv6_init(void) { int ret; ret = inet6_register_protosw(&rawv6_protosw); if (ret) goto out; out: return ret; } void rawv6_exit(void) { inet6_unregister_protosw(&rawv6_protosw); }
gpl-2.0
czechop/kernel_milestone2
arch/mips/nxp/pnx833x/common/platform.c
1373
7981
/* * platform.c: platform support for PNX833X. * * Copyright 2008 NXP Semiconductors * Chris Steel <chris.steel@nxp.com> * Daniel Laird <daniel.j.laird@nxp.com> * * Based on software written by: * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/resource.h> #include <linux/serial.h> #include <linux/serial_pnx8xxx.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #ifdef CONFIG_I2C_PNX0105 /* Until i2c driver available in kernel.*/ #include <linux/i2c-pnx0105.h> #endif #include <irq.h> #include <irq-mapping.h> #include <pnx833x.h> static u64 uart_dmamask = DMA_BIT_MASK(32); static struct resource pnx833x_uart_resources[] = { [0] = { .start = PNX833X_UART0_PORTS_START, .end = PNX833X_UART0_PORTS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX833X_PIC_UART0_INT, .end = PNX833X_PIC_UART0_INT, .flags = IORESOURCE_IRQ, }, [2] = { .start = PNX833X_UART1_PORTS_START, .end = PNX833X_UART1_PORTS_END, .flags = IORESOURCE_MEM, }, [3] = { .start = PNX833X_PIC_UART1_INT, .end = PNX833X_PIC_UART1_INT, .flags = IORESOURCE_IRQ, }, }; struct pnx8xxx_port pnx8xxx_ports[] = { [0] = { .port = { .type = PORT_PNX8XXX, .iotype = UPIO_MEM, .membase = (void __iomem *)PNX833X_UART0_PORTS_START, .mapbase = PNX833X_UART0_PORTS_START, .irq = PNX833X_PIC_UART0_INT, .uartclk = 3692300, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .line = 0, }, }, [1] = { .port = { .type = PORT_PNX8XXX, .iotype = UPIO_MEM, .membase = (void __iomem *)PNX833X_UART1_PORTS_START, .mapbase = PNX833X_UART1_PORTS_START, .irq = PNX833X_PIC_UART1_INT, .uartclk = 3692300, .fifosize = 16, .flags = UPF_BOOT_AUTOCONF, .line = 1, }, }, }; static struct platform_device pnx833x_uart_device = { .name = "pnx8xxx-uart", .id = -1, .dev = { .dma_mask = &uart_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = pnx8xxx_ports, }, .num_resources = ARRAY_SIZE(pnx833x_uart_resources), .resource = pnx833x_uart_resources, }; static u64 ehci_dmamask = DMA_BIT_MASK(32); static struct resource pnx833x_usb_ehci_resources[] = { [0] = { .start = PNX833X_USB_PORTS_START, .end = PNX833X_USB_PORTS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX833X_PIC_USB_INT, .end = PNX833X_PIC_USB_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device pnx833x_usb_ehci_device = { .name = "pnx833x-ehci", .id = -1, .dev = { .dma_mask = &ehci_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pnx833x_usb_ehci_resources), .resource = pnx833x_usb_ehci_resources, }; #ifdef CONFIG_I2C_PNX0105 static struct resource pnx833x_i2c0_resources[] = { { .start = PNX833X_I2C0_PORTS_START, .end = PNX833X_I2C0_PORTS_END, .flags = IORESOURCE_MEM, }, { .start = PNX833X_PIC_I2C0_INT, .end = PNX833X_PIC_I2C0_INT, .flags = IORESOURCE_IRQ, }, }; static struct resource pnx833x_i2c1_resources[] = { { .start = PNX833X_I2C1_PORTS_START, .end = PNX833X_I2C1_PORTS_END, .flags = IORESOURCE_MEM, }, { .start = PNX833X_PIC_I2C1_INT, .end = PNX833X_PIC_I2C1_INT, .flags = IORESOURCE_IRQ, }, }; static struct i2c_pnx0105_dev pnx833x_i2c_dev[] = { { .base = PNX833X_I2C0_PORTS_START, .irq = -1, /* should be PNX833X_PIC_I2C0_INT but polling is faster */ .clock = 6, /* 0 == 400 kHz, 4 == 100 kHz(Maximum HDMI), 6 = 50kHz(Prefered HDCP) */ .bus_addr = 0, /* no slave support */ }, { .base = PNX833X_I2C1_PORTS_START, .irq = -1, /* on high freq, polling is faster */ /*.irq = PNX833X_PIC_I2C1_INT,*/ .clock = 4, /* 0 == 400 kHz, 4 == 100 kHz. 100 kHz seems a safe default for now */ .bus_addr = 0, /* no slave support */ }, }; static struct platform_device pnx833x_i2c0_device = { .name = "i2c-pnx0105", .id = 0, .dev = { .platform_data = &pnx833x_i2c_dev[0], }, .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources), .resource = pnx833x_i2c0_resources, }; static struct platform_device pnx833x_i2c1_device = { .name = "i2c-pnx0105", .id = 1, .dev = { .platform_data = &pnx833x_i2c_dev[1], }, .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources), .resource = pnx833x_i2c1_resources, }; #endif static u64 ethernet_dmamask = DMA_BIT_MASK(32); static struct resource pnx833x_ethernet_resources[] = { [0] = { .start = PNX8335_IP3902_PORTS_START, .end = PNX8335_IP3902_PORTS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX8335_PIC_ETHERNET_INT, .end = PNX8335_PIC_ETHERNET_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device pnx833x_ethernet_device = { .name = "ip3902-eth", .id = -1, .dev = { .dma_mask = &ethernet_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(pnx833x_ethernet_resources), .resource = pnx833x_ethernet_resources, }; static struct resource pnx833x_sata_resources[] = { [0] = { .start = PNX8335_SATA_PORTS_START, .end = PNX8335_SATA_PORTS_END, .flags = IORESOURCE_MEM, }, [1] = { .start = PNX8335_PIC_SATA_INT, .end = PNX8335_PIC_SATA_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device pnx833x_sata_device = { .name = "pnx833x-sata", .id = -1, .num_resources = ARRAY_SIZE(pnx833x_sata_resources), .resource = pnx833x_sata_resources, }; static const char *part_probes[] = { "cmdlinepart", NULL }; static void pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; unsigned long nandaddr = (unsigned long)this->IO_ADDR_W; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, (void __iomem *)(nandaddr + PNX8335_NAND_CLE_MASK)); else writeb(cmd, (void __iomem *)(nandaddr + PNX8335_NAND_ALE_MASK)); } static struct platform_nand_data pnx833x_flash_nand_data = { .chip = { .chip_delay = 25, .part_probe_types = part_probes, }, .ctrl = { .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl } }; /* * Set start to be the correct address (PNX8335_NAND_BASE with no 0xb!!), * 12 bytes more seems to be the standard that allows for NAND access. */ static struct resource pnx833x_flash_nand_resource = { .start = PNX8335_NAND_BASE, .end = PNX8335_NAND_BASE + 12, .flags = IORESOURCE_MEM, }; static struct platform_device pnx833x_flash_nand = { .name = "gen_nand", .id = -1, .num_resources = 1, .resource = &pnx833x_flash_nand_resource, .dev = { .platform_data = &pnx833x_flash_nand_data, }, }; static struct platform_device *pnx833x_platform_devices[] __initdata = { &pnx833x_uart_device, &pnx833x_usb_ehci_device, #ifdef CONFIG_I2C_PNX0105 &pnx833x_i2c0_device, &pnx833x_i2c1_device, #endif &pnx833x_ethernet_device, &pnx833x_sata_device, &pnx833x_flash_nand, }; static int __init pnx833x_platform_init(void) { int res; res = platform_add_devices(pnx833x_platform_devices, ARRAY_SIZE(pnx833x_platform_devices)); return res; } arch_initcall(pnx833x_platform_init);
gpl-2.0
ikpb/android_kernel_blu_studio6lte
drivers/gpu/ion/ion_removed_heap.c
1629
9500
/* * drivers/gpu/ion/ion_removed_heap.c * * Copyright (C) 2011 Google, Inc. * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/spinlock.h> #include <linux/err.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/ion.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/seq_file.h> #include "ion_priv.h" #include <asm/mach/map.h> #include <asm/cacheflush.h> #include <linux/msm_ion.h> struct ion_removed_heap { struct ion_heap heap; struct gen_pool *pool; ion_phys_addr_t base; unsigned long allocated_bytes; unsigned long total_size; int (*request_region)(void *); int (*release_region)(void *); atomic_t map_count; void *bus_id; }; ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap, unsigned long size, unsigned long align) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool, size, ilog2(align)); if (!offset) { if ((removed_heap->total_size - removed_heap->allocated_bytes) >= size) pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.", __func__, heap->name, removed_heap->total_size - removed_heap->allocated_bytes, size); return ION_CARVEOUT_ALLOCATE_FAIL; } removed_heap->allocated_bytes += size; return offset; } void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); if (addr == ION_CARVEOUT_ALLOCATE_FAIL) return; gen_pool_free(removed_heap->pool, addr, size); removed_heap->allocated_bytes -= size; } static int ion_removed_heap_phys(struct ion_heap *heap, struct ion_buffer *buffer, ion_phys_addr_t *addr, size_t *len) { *addr = buffer->priv_phys; *len = buffer->size; return 0; } static int ion_removed_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned long align, unsigned long flags) { buffer->priv_phys = ion_removed_allocate(heap, size, align); return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; } static void ion_removed_heap_free(struct ion_buffer *buffer) { struct ion_heap *heap = buffer->heap; ion_removed_free(heap, buffer->priv_phys, buffer->size); buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; } struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { struct sg_table *table; int ret; table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) return ERR_PTR(-ENOMEM); ret = sg_alloc_table(table, 1, GFP_KERNEL); if (ret) goto err0; table->sgl->length = buffer->size; table->sgl->offset = 0; table->sgl->dma_address = buffer->priv_phys; return table; err0: kfree(table); return ERR_PTR(ret); } void ion_removed_heap_unmap_dma(struct ion_heap *heap, struct ion_buffer *buffer) { if (buffer->sg_table) sg_free_table(buffer->sg_table); kfree(buffer->sg_table); buffer->sg_table = 0; } static int ion_removed_request_region(struct ion_removed_heap *removed_heap) { int ret_value = 0; if (atomic_inc_return(&removed_heap->map_count) == 1) { if (removed_heap->request_region) { ret_value = removed_heap->request_region( removed_heap->bus_id); if (ret_value) { pr_err("Unable to request SMI region"); atomic_dec(&removed_heap->map_count); } } } return ret_value; } static int ion_removed_release_region(struct ion_removed_heap *removed_heap) { int ret_value = 0; if (atomic_dec_and_test(&removed_heap->map_count)) { if (removed_heap->release_region) { ret_value = removed_heap->release_region( removed_heap->bus_id); if (ret_value) pr_err("Unable to release SMI region"); } } return ret_value; } void *ion_removed_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); void *ret_value; if (ion_removed_request_region(removed_heap)) return NULL; if (ION_IS_CACHED(buffer->flags)) ret_value = ioremap_cached(buffer->priv_phys, buffer->size); else ret_value = ioremap(buffer->priv_phys, buffer->size); if (!ret_value) ion_removed_release_region(removed_heap); return ret_value; } void ion_removed_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); __arm_iounmap(buffer->vaddr); buffer->vaddr = NULL; ion_removed_release_region(removed_heap); return; } int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, struct vm_area_struct *vma) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); int ret_value = 0; if (ion_removed_request_region(removed_heap)) return -EINVAL; if (!ION_IS_CACHED(buffer->flags)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ret_value = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); if (ret_value) ion_removed_release_region(removed_heap); return ret_value; } void ion_removed_heap_unmap_user(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); ion_removed_release_region(removed_heap); } static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s, const struct list_head *mem_map) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); seq_printf(s, "total bytes currently allocated: %lx\n", removed_heap->allocated_bytes); seq_printf(s, "total heap size: %lx\n", removed_heap->total_size); if (mem_map) { unsigned long base = removed_heap->base; unsigned long size = removed_heap->total_size; unsigned long end = base+size; unsigned long last_end = base; struct mem_map_data *data; seq_printf(s, "\nMemory Map\n"); seq_printf(s, "%16.s %14.s %14.s %14.s\n", "client", "start address", "end address", "size (hex)"); list_for_each_entry(data, mem_map, node) { const char *client_name = "(null)"; if (last_end < data->addr) { phys_addr_t da; da = data->addr-1; seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", "FREE", &last_end, &da, data->addr-last_end, data->addr-last_end); } if (data->client_name) client_name = data->client_name; seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n", client_name, &data->addr, &data->addr_end, data->size, data->size); last_end = data->addr_end+1; } if (last_end < end) { seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE", last_end, end-1, end-last_end, end-last_end); } } return 0; } static struct ion_heap_ops removed_heap_ops = { .allocate = ion_removed_heap_allocate, .free = ion_removed_heap_free, .phys = ion_removed_heap_phys, .map_user = ion_removed_heap_map_user, .map_kernel = ion_removed_heap_map_kernel, .unmap_user = ion_removed_heap_unmap_user, .unmap_kernel = ion_removed_heap_unmap_kernel, .map_dma = ion_removed_heap_map_dma, .unmap_dma = ion_removed_heap_unmap_dma, .print_debug = ion_removed_print_debug, }; struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data) { struct ion_removed_heap *removed_heap; int ret; removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL); if (!removed_heap) return ERR_PTR(-ENOMEM); removed_heap->pool = gen_pool_create(12, -1); if (!removed_heap->pool) { kfree(removed_heap); return ERR_PTR(-ENOMEM); } removed_heap->base = heap_data->base; ret = gen_pool_add(removed_heap->pool, removed_heap->base, heap_data->size, -1); if (ret < 0) { gen_pool_destroy(removed_heap->pool); kfree(removed_heap); return ERR_PTR(-EINVAL); } removed_heap->heap.ops = &removed_heap_ops; removed_heap->heap.type = ION_HEAP_TYPE_REMOVED; removed_heap->allocated_bytes = 0; removed_heap->total_size = heap_data->size; if (heap_data->extra_data) { struct ion_co_heap_pdata *extra_data = heap_data->extra_data; if (extra_data->setup_region) removed_heap->bus_id = extra_data->setup_region(); if (extra_data->request_region) removed_heap->request_region = extra_data->request_region; if (extra_data->release_region) removed_heap->release_region = extra_data->release_region; } return &removed_heap->heap; } void ion_removed_heap_destroy(struct ion_heap *heap) { struct ion_removed_heap *removed_heap = container_of(heap, struct ion_removed_heap, heap); gen_pool_destroy(removed_heap->pool); kfree(removed_heap); removed_heap = NULL; }
gpl-2.0
AD5GB/android_kernel_googlesource-common
drivers/net/ethernet/intel/igb/e1000_nvm.c
2141
19257
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/if_ether.h> #include <linux/delay.h> #include "e1000_mac.h" #include "e1000_nvm.h" /** * igb_raise_eec_clk - Raise EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Enable/Raise the EEPROM clock bit. **/ static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd | E1000_EECD_SK; wr32(E1000_EECD, *eecd); wrfl(); udelay(hw->nvm.delay_usec); } /** * igb_lower_eec_clk - Lower EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Clear/Lower the EEPROM clock bit. **/ static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) { *eecd = *eecd & ~E1000_EECD_SK; wr32(E1000_EECD, *eecd); wrfl(); udelay(hw->nvm.delay_usec); } /** * igb_shift_out_eec_bits - Shift data bits our to the EEPROM * @hw: pointer to the HW structure * @data: data to send to the EEPROM * @count: number of bits to shift out * * We need to shift 'count' bits out to the EEPROM. So, the value in the * "data" parameter will be shifted out to the EEPROM one bit at a time. * In order to do this, "data" must be broken down into bits. **/ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = rd32(E1000_EECD); u32 mask; mask = 0x01 << (count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; do { eecd &= ~E1000_EECD_DI; if (data & mask) eecd |= E1000_EECD_DI; wr32(E1000_EECD, eecd); wrfl(); udelay(nvm->delay_usec); igb_raise_eec_clk(hw, &eecd); igb_lower_eec_clk(hw, &eecd); mask >>= 1; } while (mask); eecd &= ~E1000_EECD_DI; wr32(E1000_EECD, eecd); } /** * igb_shift_in_eec_bits - Shift data bits in from the EEPROM * @hw: pointer to the HW structure * @count: number of bits to shift in * * In order to read a register from the EEPROM, we need to shift 'count' bits * in from the EEPROM. Bits are "shifted in" by raising the clock input to * the EEPROM (setting the SK bit), and then reading the value of the data out * "DO" bit. During this "shifting in" process the data in "DI" bit should * always be clear. **/ static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count) { u32 eecd; u32 i; u16 data; eecd = rd32(E1000_EECD); eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); data = 0; for (i = 0; i < count; i++) { data <<= 1; igb_raise_eec_clk(hw, &eecd); eecd = rd32(E1000_EECD); eecd &= ~E1000_EECD_DI; if (eecd & E1000_EECD_DO) data |= 1; igb_lower_eec_clk(hw, &eecd); } return data; } /** * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion * @hw: pointer to the HW structure * @ee_reg: EEPROM flag for polling * * Polls the EEPROM status bit for either read or write completion based * upon the value of 'ee_reg'. **/ static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) { u32 attempts = 100000; u32 i, reg = 0; s32 ret_val = -E1000_ERR_NVM; for (i = 0; i < attempts; i++) { if (ee_reg == E1000_NVM_POLL_READ) reg = rd32(E1000_EERD); else reg = rd32(E1000_EEWR); if (reg & E1000_NVM_RW_REG_DONE) { ret_val = 0; break; } udelay(5); } return ret_val; } /** * igb_acquire_nvm - Generic request for access to EEPROM * @hw: pointer to the HW structure * * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ s32 igb_acquire_nvm(struct e1000_hw *hw) { u32 eecd = rd32(E1000_EECD); s32 timeout = E1000_NVM_GRANT_ATTEMPTS; s32 ret_val = 0; wr32(E1000_EECD, eecd | E1000_EECD_REQ); eecd = rd32(E1000_EECD); while (timeout) { if (eecd & E1000_EECD_GNT) break; udelay(5); eecd = rd32(E1000_EECD); timeout--; } if (!timeout) { eecd &= ~E1000_EECD_REQ; wr32(E1000_EECD, eecd); hw_dbg("Could not acquire NVM grant\n"); ret_val = -E1000_ERR_NVM; } return ret_val; } /** * igb_standby_nvm - Return EEPROM to standby state * @hw: pointer to the HW structure * * Return the EEPROM to a standby state. **/ static void igb_standby_nvm(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = rd32(E1000_EECD); if (nvm->type == e1000_nvm_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= E1000_EECD_CS; wr32(E1000_EECD, eecd); wrfl(); udelay(nvm->delay_usec); eecd &= ~E1000_EECD_CS; wr32(E1000_EECD, eecd); wrfl(); udelay(nvm->delay_usec); } } /** * e1000_stop_nvm - Terminate EEPROM command * @hw: pointer to the HW structure * * Terminates the current command by inverting the EEPROM's chip select pin. **/ static void e1000_stop_nvm(struct e1000_hw *hw) { u32 eecd; eecd = rd32(E1000_EECD); if (hw->nvm.type == e1000_nvm_eeprom_spi) { /* Pull CS high */ eecd |= E1000_EECD_CS; igb_lower_eec_clk(hw, &eecd); } } /** * igb_release_nvm - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ void igb_release_nvm(struct e1000_hw *hw) { u32 eecd; e1000_stop_nvm(hw); eecd = rd32(E1000_EECD); eecd &= ~E1000_EECD_REQ; wr32(E1000_EECD, eecd); } /** * igb_ready_nvm_eeprom - Prepares EEPROM for read/write * @hw: pointer to the HW structure * * Setups the EEPROM for reading and writing. **/ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = rd32(E1000_EECD); s32 ret_val = 0; u16 timeout = 0; u8 spi_stat_reg; if (nvm->type == e1000_nvm_eeprom_spi) { /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); wr32(E1000_EECD, eecd); wrfl(); udelay(1); timeout = NVM_MAX_RETRY_SPI; /* Read "Status Register" repeatedly until the LSB is cleared. * The EEPROM will signal that the command has been completed * by clearing bit 0 of the internal status register. If it's * not cleared within 'timeout', then error out. */ while (timeout) { igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, hw->nvm.opcode_bits); spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8); if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) break; udelay(5); igb_standby_nvm(hw); timeout--; } if (!timeout) { hw_dbg("SPI NVM Status error\n"); ret_val = -E1000_ERR_NVM; goto out; } } out: return ret_val; } /** * igb_read_nvm_spi - Read EEPROM's using SPI * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM. **/ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i = 0; s32 ret_val; u16 word_in; u8 read_opcode = NVM_READ_OPCODE_SPI; /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { hw_dbg("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } ret_val = nvm->ops.acquire(hw); if (ret_val) goto out; ret_val = igb_ready_nvm_eeprom(hw); if (ret_val) goto release; igb_standby_nvm(hw); if ((nvm->address_bits == 8) && (offset >= 128)) read_opcode |= NVM_A8_OPCODE_SPI; /* Send the READ command (opcode + addr) */ igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); /* Read the data. SPI NVMs increment the address with each byte * read and will roll over if reading beyond the end. This allows * us to read the whole NVM from any offset */ for (i = 0; i < words; i++) { word_in = igb_shift_in_eec_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); } release: nvm->ops.release(hw); out: return ret_val; } /** * igb_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, eerd = 0; s32 ret_val = 0; /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { hw_dbg("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } for (i = 0; i < words; i++) { eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + E1000_NVM_RW_REG_START; wr32(E1000_EERD, eerd); ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); if (ret_val) break; data[i] = (rd32(E1000_EERD) >> E1000_NVM_RW_REG_DATA); } out: return ret_val; } /** * igb_write_nvm_spi - Write to EEPROM using SPI * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * Writes data to EEPROM at offset using SPI interface. * * If e1000_update_nvm_checksum is not called after this function , the * EEPROM will most likley contain an invalid checksum. **/ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val = -E1000_ERR_NVM; u16 widx = 0; /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { hw_dbg("nvm parameter(s) out of bounds\n"); return ret_val; } while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; ret_val = nvm->ops.acquire(hw); if (ret_val) return ret_val; ret_val = igb_ready_nvm_eeprom(hw); if (ret_val) { nvm->ops.release(hw); return ret_val; } igb_standby_nvm(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, nvm->opcode_bits); igb_standby_nvm(hw); /* Some SPI eeproms use the 8th address bit embedded in the * opcode */ if ((nvm->address_bits == 8) && (offset >= 128)) write_opcode |= NVM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), nvm->address_bits); /* Loop to allow for up to whole page write of eeprom */ while (widx < words) { u16 word_out = data[widx]; word_out = (word_out >> 8) | (word_out << 8); igb_shift_out_eec_bits(hw, word_out, 16); widx++; if ((((offset + widx) * 2) % nvm->page_size) == 0) { igb_standby_nvm(hw); break; } } usleep_range(1000, 2000); nvm->ops.release(hw); } return ret_val; } /** * igb_read_part_string - Read device part number * @hw: pointer to the HW structure * @part_num: pointer to device part number * @part_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in part_num. **/ s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size) { s32 ret_val; u16 nvm_data; u16 pointer; u16 offset; u16 length; if (part_num == NULL) { hw_dbg("PBA string buffer was null\n"); ret_val = E1000_ERR_INVALID_ARGUMENT; goto out; } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } /* if nvm_data is not ptr guard the PBA must be in legacy format which * means pointer is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (nvm_data != NVM_PBA_PTR_GUARD) { hw_dbg("NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (part_num_size < 11) { hw_dbg("PBA string buffer too small\n"); return E1000_ERR_NO_SPACE; } /* extract hex string from data and pointer */ part_num[0] = (nvm_data >> 12) & 0xF; part_num[1] = (nvm_data >> 8) & 0xF; part_num[2] = (nvm_data >> 4) & 0xF; part_num[3] = nvm_data & 0xF; part_num[4] = (pointer >> 12) & 0xF; part_num[5] = (pointer >> 8) & 0xF; part_num[6] = '-'; part_num[7] = 0; part_num[8] = (pointer >> 4) & 0xF; part_num[9] = pointer & 0xF; /* put a null character on the end of our string */ part_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { if (part_num[offset] < 0xA) part_num[offset] += '0'; else if (part_num[offset] < 0x10) part_num[offset] += 'A' - 0xA; } goto out; } ret_val = hw->nvm.ops.read(hw, pointer, 1, &length); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } if (length == 0xFFFF || length == 0) { hw_dbg("NVM PBA number section invalid length\n"); ret_val = E1000_ERR_NVM_PBA_SECTION; goto out; } /* check if part_num buffer is big enough */ if (part_num_size < (((u32)length * 2) - 1)) { hw_dbg("PBA string buffer too small\n"); ret_val = E1000_ERR_NO_SPACE; goto out; } /* trim pba length from start of string */ pointer++; length--; for (offset = 0; offset < length; offset++) { ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } part_num[offset * 2] = (u8)(nvm_data >> 8); part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); } part_num[offset * 2] = '\0'; out: return ret_val; } /** * igb_read_mac_addr - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. **/ s32 igb_read_mac_addr(struct e1000_hw *hw) { u32 rar_high; u32 rar_low; u16 i; rar_high = rd32(E1000_RAH(0)); rar_low = rd32(E1000_RAL(0)); for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); for (i = 0; i < ETH_ALEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; return 0; } /** * igb_validate_nvm_checksum - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ s32 igb_validate_nvm_checksum(struct e1000_hw *hw) { s32 ret_val = 0; u16 checksum = 0; u16 i, nvm_data; for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error\n"); goto out; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { hw_dbg("NVM Checksum Invalid\n"); ret_val = -E1000_ERR_NVM; goto out; } out: return ret_val; } /** * igb_update_nvm_checksum - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ s32 igb_update_nvm_checksum(struct e1000_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { hw_dbg("NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) hw_dbg("NVM Write Error while updating checksum.\n"); out: return ret_val; } /** * igb_get_fw_version - Get firmware version information * @hw: pointer to the HW structure * @fw_vers: pointer to output structure * * unsupported MAC types will return all 0 version structure **/ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) { u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; u16 fw_version; memset(fw_vers, 0, sizeof(struct e1000_fw_version)); switch (hw->mac.type) { case e1000_i211: igb_read_invm_version(hw, fw_vers); return; case e1000_82575: case e1000_82576: case e1000_82580: case e1000_i354: case e1000_i350: case e1000_i210: break; default: return; } /* basic eeprom version numbers */ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT; fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK); /* etrack id */ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl; switch (hw->mac.type) { case e1000_i210: case e1000_i354: case e1000_i350: /* find combo image version */ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) { hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + 1), 1, &comb_verh); hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), 1, &comb_verl); /* get Option Rom version if it exists and is valid */ if ((comb_verh && comb_verl) && ((comb_verh != NVM_VER_INVALID) && (comb_verl != NVM_VER_INVALID))) { fw_vers->or_valid = true; fw_vers->or_major = comb_verl >> NVM_COMB_VER_SHFT; fw_vers->or_build = ((comb_verl << NVM_COMB_VER_SHFT) | (comb_verh >> NVM_COMB_VER_SHFT)); fw_vers->or_patch = comb_verh & NVM_COMB_VER_MASK; } } break; default: break; } return; }
gpl-2.0
caplio/valente_wx-ics
drivers/tty/serial/s3c2410.c
2397
3001
/* * Driver for Samsung S3C2410 SoC onboard UARTs. * * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <asm/irq.h> #include <mach/hardware.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include "samsung.h" static int s3c2410_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { unsigned long ucon = rd_regl(port, S3C2410_UCON); if (strcmp(clk->name, "uclk") == 0) ucon |= S3C2410_UCON_UCLK; else ucon &= ~S3C2410_UCON_UCLK; wr_regl(port, S3C2410_UCON, ucon); return 0; } static int s3c2410_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { unsigned long ucon = rd_regl(port, S3C2410_UCON); clk->divisor = 1; clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk"; return 0; } static int s3c2410_serial_resetport(struct uart_port *port, struct s3c2410_uartcfg *cfg) { dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n", port, port->mapbase, cfg); wr_regl(port, S3C2410_UCON, cfg->ucon); wr_regl(port, S3C2410_ULCON, cfg->ulcon); /* reset both fifos */ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); wr_regl(port, S3C2410_UFCON, cfg->ufcon); return 0; } static struct s3c24xx_uart_info s3c2410_uart_inf = { .name = "Samsung S3C2410 UART", .type = PORT_S3C2410, .fifosize = 16, .rx_fifomask = S3C2410_UFSTAT_RXMASK, .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT, .rx_fifofull = S3C2410_UFSTAT_RXFULL, .tx_fifofull = S3C2410_UFSTAT_TXFULL, .tx_fifomask = S3C2410_UFSTAT_TXMASK, .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT, .get_clksrc = s3c2410_serial_getsource, .set_clksrc = s3c2410_serial_setsource, .reset_port = s3c2410_serial_resetport, }; static int s3c2410_serial_probe(struct platform_device *dev) { return s3c24xx_serial_probe(dev, &s3c2410_uart_inf); } static struct platform_driver s3c2410_serial_driver = { .probe = s3c2410_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { .name = "s3c2410-uart", .owner = THIS_MODULE, }, }; s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf); static int __init s3c2410_serial_init(void) { return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf); } static void __exit s3c2410_serial_exit(void) { platform_driver_unregister(&s3c2410_serial_driver); } module_init(s3c2410_serial_init); module_exit(s3c2410_serial_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver"); MODULE_ALIAS("platform:s3c2410-uart");
gpl-2.0
wjb/mx-common
drivers/target/target_core_scdb.c
2653
2654
/******************************************************************************* * Filename: target_core_scdb.c * * This file contains the generic target engine Split CDB related functions. * * Copyright (c) 2004-2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. * Copyright (c) 2007-2010 Rising Tide Systems * Copyright (c) 2008-2010 Linux-iSCSI.org * * Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ******************************************************************************/ #include <linux/net.h> #include <linux/string.h> #include <scsi/scsi.h> #include <asm/unaligned.h> #include <target/target_core_base.h> #include <target/target_core_transport.h> #include "target_core_scdb.h" /* split_cdb_XX_6(): * * 21-bit LBA w/ 8-bit SECTORS */ void split_cdb_XX_6( unsigned long long lba, u32 *sectors, unsigned char *cdb) { cdb[1] = (lba >> 16) & 0x1f; cdb[2] = (lba >> 8) & 0xff; cdb[3] = lba & 0xff; cdb[4] = *sectors & 0xff; } /* split_cdb_XX_10(): * * 32-bit LBA w/ 16-bit SECTORS */ void split_cdb_XX_10( unsigned long long lba, u32 *sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); put_unaligned_be16(*sectors, &cdb[7]); } /* split_cdb_XX_12(): * * 32-bit LBA w/ 32-bit SECTORS */ void split_cdb_XX_12( unsigned long long lba, u32 *sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); put_unaligned_be32(*sectors, &cdb[6]); } /* split_cdb_XX_16(): * * 64-bit LBA w/ 32-bit SECTORS */ void split_cdb_XX_16( unsigned long long lba, u32 *sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[2]); put_unaligned_be32(*sectors, &cdb[10]); } /* * split_cdb_XX_32(): * * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 */ void split_cdb_XX_32( unsigned long long lba, u32 *sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[12]); put_unaligned_be32(*sectors, &cdb[28]); }
gpl-2.0
kirananto/RAZOR_REDMI2
sound/core/memory.c
3165
2563
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * Misc memory accessors * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/export.h> #include <asm/io.h> #include <asm/uaccess.h> #include <sound/core.h> /** * copy_to_user_fromio - copy data from mmio-space to user-space * @dst: the destination pointer on user-space * @src: the source pointer on mmio * @count: the data size to copy in bytes * * Copies the data from mmio-space to user-space. * * Return: Zero if successful, or non-zero on failure. */ int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size_t count) { #if defined(__i386__) || defined(CONFIG_SPARC32) return copy_to_user(dst, (const void __force*)src, count) ? -EFAULT : 0; #else char buf[256]; while (count) { size_t c = count; if (c > sizeof(buf)) c = sizeof(buf); memcpy_fromio(buf, (void __iomem *)src, c); if (copy_to_user(dst, buf, c)) return -EFAULT; count -= c; dst += c; src += c; } return 0; #endif } EXPORT_SYMBOL(copy_to_user_fromio); /** * copy_from_user_toio - copy data from user-space to mmio-space * @dst: the destination pointer on mmio-space * @src: the source pointer on user-space * @count: the data size to copy in bytes * * Copies the data from user-space to mmio-space. * * Return: Zero if successful, or non-zero on failure. */ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size_t count) { #if defined(__i386__) || defined(CONFIG_SPARC32) return copy_from_user((void __force *)dst, src, count) ? -EFAULT : 0; #else char buf[256]; while (count) { size_t c = count; if (c > sizeof(buf)) c = sizeof(buf); if (copy_from_user(buf, src, c)) return -EFAULT; memcpy_toio(dst, buf, c); count -= c; dst += c; src += c; } return 0; #endif } EXPORT_SYMBOL(copy_from_user_toio);
gpl-2.0
jcadduono/nethunter_kernel_noblelte
arch/mips/lib/dump_tlb.c
3421
2721
/* * Dump R4x00 TLB for debugging purposes. * * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. * Copyright (C) 1999 by Silicon Graphics, Inc. */ #include <linux/kernel.h> #include <linux/mm.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> static inline const char *msk2str(unsigned int mask) { switch (mask) { case PM_4K: return "4kb"; case PM_16K: return "16kb"; case PM_64K: return "64kb"; case PM_256K: return "256kb"; #ifdef CONFIG_CPU_CAVIUM_OCTEON case PM_8K: return "8kb"; case PM_32K: return "32kb"; case PM_128K: return "128kb"; case PM_512K: return "512kb"; case PM_2M: return "2Mb"; case PM_8M: return "8Mb"; case PM_32M: return "32Mb"; #endif #ifndef CONFIG_CPU_VR41XX case PM_1M: return "1Mb"; case PM_4M: return "4Mb"; case PM_16M: return "16Mb"; case PM_64M: return "64Mb"; case PM_256M: return "256Mb"; case PM_1G: return "1Gb"; #endif } return ""; } #define BARRIER() \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ "nop;nop;nop;nop;nop;nop;nop\n\t" \ ".set\treorder"); static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1; unsigned int s_index, s_pagemask, pagemask, c0, c1, i; s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); BARRIER(); tlb_read(); BARRIER(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); /* Unused entries have a virtual address of CKSEG0. */ if ((entryhi & ~0x1ffffUL) != CKSEG0 && (entryhi & 0xff) == asid) { #ifdef CONFIG_32BIT int width = 8; #else int width = 11; #endif /* * Only print entries in use */ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); c0 = (entrylo0 >> 3) & 7; c1 = (entrylo1 >> 3) & 7; printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), entryhi & 0xff); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, (entrylo0 & 4) ? 1 : 0, (entrylo0 & 2) ? 1 : 0, (entrylo0 & 1) ? 1 : 0); printk("[pa=%0*llx c=%d d=%d v=%d g=%d]\n", width, (entrylo1 << 6) & PAGE_MASK, c1, (entrylo1 & 4) ? 1 : 0, (entrylo1 & 2) ? 1 : 0, (entrylo1 & 1) ? 1 : 0); } } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); write_c0_pagemask(s_pagemask); } void dump_tlb_all(void) { dump_tlb(0, current_cpu_data.tlbsize - 1); }
gpl-2.0
bigbiff/android_kernel_samsung_sm900p
drivers/gpu/drm/i915/intel_bios.c
3677
20378
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include <linux/dmi.h> #include <drm/drm_dp_helper.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_bios.h" #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; static void * find_section(struct bdb_header *bdb, int section_id) { u8 *base = (u8 *)bdb; int index = 0; u16 total, current_size; u8 current_id; /* skip to first section */ index += bdb->header_size; total = bdb->bdb_size; /* walk the sections looking for section_id */ while (index < total) { current_id = *(base + index); index++; current_size = *((u16 *)(base + index)); index += 2; if (current_id == section_id) return base + index; index += current_size; } return NULL; } static u16 get_blocksize(void *p) { u16 *block_ptr, block_size; block_ptr = (u16 *)((char *)p - 2); block_size = *block_ptr; return block_size; } static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) { panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | dvo_timing->hactive_lo; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + dvo_timing->hsync_pulse_width; panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | dvo_timing->vactive_lo; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + dvo_timing->vsync_off; panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + dvo_timing->vsync_pulse_width; panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; if (dvo_timing->hsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; if (dvo_timing->vsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; drm_mode_set_name(panel_fixed_mode); } static bool lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, const struct lvds_dvo_timing *b) { if (a->hactive_hi != b->hactive_hi || a->hactive_lo != b->hactive_lo) return false; if (a->hsync_off_hi != b->hsync_off_hi || a->hsync_off_lo != b->hsync_off_lo) return false; if (a->hsync_pulse_width != b->hsync_pulse_width) return false; if (a->hblank_hi != b->hblank_hi || a->hblank_lo != b->hblank_lo) return false; if (a->vactive_hi != b->vactive_hi || a->vactive_lo != b->vactive_lo) return false; if (a->vsync_off != b->vsync_off) return false; if (a->vsync_pulse_width != b->vsync_pulse_width) return false; if (a->vblank_hi != b->vblank_hi || a->vblank_lo != b->vblank_lo) return false; return true; } static const struct lvds_dvo_timing * get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, int index) { /* * the size of fp_timing varies on the different platform. * So calculate the DVO timing relative offset in LVDS data * entry to get the DVO timing entry */ int lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; int dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index; return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); } /* Try to find integrated panel data */ static void parse_lfp_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { const struct bdb_lvds_options *lvds_options; const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; struct drm_display_mode *panel_fixed_mode; int i, downclock; lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; dev_priv->lvds_dither = lvds_options->pixel_dither; if (lvds_options->panel_type == 0xff) return; panel_type = lvds_options->panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); if (!lvds_lfp_data) return; lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); if (!lvds_lfp_data_ptrs) return; dev_priv->lvds_vbt = 1; panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, lvds_options->panel_type); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); /* * Iterate over the LVDS panel timing info to find the lowest clock * for the native resolution. */ downclock = panel_dvo_timing->clock; for (i = 0; i < 16; i++) { const struct lvds_dvo_timing *dvo_timing; dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, i); if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && dvo_timing->clock < downclock) downclock = dvo_timing->clock; } if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock * 10; DRM_DEBUG_KMS("LVDS downclock is found in VBT. " "Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, 10*downclock); } } /* Try to find sdvo panel data */ static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; int index; index = i915_vbt_sdvo_panel_type; if (index == -1) { struct bdb_sdvo_lvds_options *sdvo_lvds_options; sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); if (!dvo_timing) return; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); } static int intel_bios_ssc_frequency(struct drm_device *dev, bool alternate) { switch (INTEL_INFO(dev)->gen) { case 2: return alternate ? 66 : 48; case 3: case 4: return alternate ? 100 : 96; default: return alternate ? 100 : 120; } } static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; general = find_section(bdb, BDB_GENERAL_FEATURES); if (general) { dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; dev_priv->lvds_use_ssc = general->enable_ssc; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, general->ssc_freq); dev_priv->display_clock_mode = general->display_clock_mode; DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", dev_priv->int_tv_support, dev_priv->int_crt_support, dev_priv->lvds_use_ssc, dev_priv->lvds_ssc_freq, dev_priv->display_clock_mode); } } static void parse_general_definitions(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *general; general = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (general) { u16 block_size = get_blocksize(general); if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); if (bus_pin >= 1 && bus_pin <= 6) dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", block_size); } } } static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct sdvo_device_mapping *p_mapping; struct bdb_general_definitions *p_defs; struct child_device_config *p_child; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } if (p_child->slave_addr != SLAVE_ADDR1 && p_child->slave_addr != SLAVE_ADDR2) { /* * If the slave address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; } if (p_child->dvo_port != DEVICE_PORT_DVOB && p_child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); continue; } DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" " %s port\n", p_child->slave_addr, (p_child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); if (!p_mapping->initialized) { p_mapping->dvo_port = p_child->dvo_port; p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; p_mapping->i2c_pin = p_child->i2c_pin; p_mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", p_mapping->dvo_port, p_mapping->slave_addr, p_mapping->dvo_wiring, p_mapping->ddc_pin, p_mapping->i2c_pin); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); } if (p_child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; } if (!count) { /* No SDVO device info is found */ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); } return; } static void parse_driver_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_driver_features *driver; driver = find_section(bdb, BDB_DRIVER_FEATURES); if (!driver) return; if (SUPPORTS_EDP(dev) && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) dev_priv->edp.support = 1; if (driver->dual_frequency) dev_priv->render_reclock_avail = true; } static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; struct edp_power_seq *edp_pps; struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { DRM_DEBUG_KMS("No eDP BDB found but eDP panel " "supported, assume %dbpp panel color " "depth.\n", dev_priv->edp.bpp); } return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: dev_priv->edp.bpp = 18; break; case EDP_24BPP: dev_priv->edp.bpp = 24; break; case EDP_30BPP: dev_priv->edp.bpp = 30; break; } /* Get the eDP sequencing and link info */ edp_pps = &edp->power_seqs[panel_type]; edp_link_params = &edp->link_params[panel_type]; dev_priv->edp.pps = *edp_pps; dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : DP_LINK_BW_1_62; switch (edp_link_params->lanes) { case 0: dev_priv->edp.lanes = 1; break; case 1: dev_priv->edp.lanes = 2; break; case 3: default: dev_priv->edp.lanes = 4; break; } switch (edp_link_params->preemphasis) { case 0: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; case 1: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; case 2: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; case 3: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; } switch (edp_link_params->vswing) { case 0: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; break; case 1: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; break; case 2: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; break; case 3: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; } } static void parse_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *p_defs; struct child_device_config *p_child, *child_dev_ptr; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; /* get the number of child device that is present */ for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } count++; } if (!count) { DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); return; } dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); if (!dev_priv->child_dev) { DRM_DEBUG_KMS("No memory space for child device\n"); return; } dev_priv->child_dev_num = count; count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } child_dev_ptr = dev_priv->child_dev + count; count++; memcpy((void *)child_dev_ptr, (void *)p_child, sizeof(*p_child)); } return; } static void init_vbt_defaults(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; /* LFP panel data */ dev_priv->lvds_dither = 1; dev_priv->lvds_vbt = 0; /* SDVO panel data */ dev_priv->sdvo_lvds_vbt_mode = NULL; /* general features */ dev_priv->int_tv_support = 1; dev_priv->int_crt_support = 1; /* Default to using SSC */ dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); /* eDP data */ dev_priv->edp.bpp = 18; } static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_no_opregion_vbt[] = { { .callback = intel_no_opregion_vbt_callback, .ident = "ThinkCentre A57", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), }, }, { } }; /** * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. * * Returns 0 on success, nonzero on failure. */ bool intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; struct bdb_header *bdb = NULL; u8 __iomem *bios = NULL; init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; if (memcmp(vbt->signature, "$VBT", 4) == 0) { DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", vbt->signature); bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); } else dev_priv->opregion.vbt = NULL; } if (bdb == NULL) { struct vbt_header *vbt = NULL; size_t size; int i; bios = pci_map_rom(pdev, &size); if (!bios) return -1; /* Scour memory looking for the VBT signature */ for (i = 0; i + 4 < size; i++) { if (!memcmp(bios + i, "$VBT", 4)) { vbt = (struct vbt_header *)(bios + i); break; } } if (!vbt) { DRM_DEBUG_DRIVER("VBT signature missing\n"); pci_unmap_rom(pdev, bios); return -1; } bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); } /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); if (bios) pci_unmap_rom(pdev, bios); return 0; } /* Ensure that vital registers have been initialised, even if the BIOS * is absent or just failing to do its job. */ void intel_setup_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Set the Panel Power On/Off timings if uninitialized. */ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { /* Set T2 to 40ms and T5 to 200ms */ I915_WRITE(PP_ON_DELAYS, 0x019007d0); /* Set T3 to 35ms and Tx to 200ms */ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); } }
gpl-2.0
viaembedded/arm-soc
net/wireless/radiotap.c
3677
11978
/* * Radiotap parser * * Copyright 2007 Andy Green <andy@warmcat.com> * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See COPYING for more details. */ #include <linux/kernel.h> #include <linux/export.h> #include <net/cfg80211.h> #include <net/ieee80211_radiotap.h> #include <asm/unaligned.h> /* function prototypes and related defs are in include/net/cfg80211.h */ static const struct radiotap_align_size rtap_namespace_sizes[] = { [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, }, [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, }, [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, }, [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, }, /* * add more here as they are defined in radiotap.h */ }; static const struct ieee80211_radiotap_namespace radiotap_ns = { .n_bits = ARRAY_SIZE(rtap_namespace_sizes), .align_size = rtap_namespace_sizes, }; /** * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization * @iterator: radiotap_iterator to initialize * @radiotap_header: radiotap header to parse * @max_length: total length we can parse into (eg, whole packet length) * * Returns: 0 or a negative error code if there is a problem. * * This function initializes an opaque iterator struct which can then * be passed to ieee80211_radiotap_iterator_next() to visit every radiotap * argument which is present in the header. It knows about extended * present headers and handles them. * * How to use: * call __ieee80211_radiotap_iterator_init() to init a semi-opaque iterator * struct ieee80211_radiotap_iterator (no need to init the struct beforehand) * checking for a good 0 return code. Then loop calling * __ieee80211_radiotap_iterator_next()... it returns either 0, * -ENOENT if there are no more args to parse, or -EINVAL if there is a problem. * The iterator's @this_arg member points to the start of the argument * associated with the current argument index that is present, which can be * found in the iterator's @this_arg_index member. This arg index corresponds * to the IEEE80211_RADIOTAP_... defines. * * Radiotap header length: * You can find the CPU-endian total radiotap header length in * iterator->max_length after executing ieee80211_radiotap_iterator_init() * successfully. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. * * Example code: * See Documentation/networking/radiotap-headers.txt */ int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { /* check the radiotap header can actually be present */ if (max_length < sizeof(struct ieee80211_radiotap_header)) return -EINVAL; /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; /* sanity check for allowed length and radiotap length field */ if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; /* find payload start allowing for extended bitmap(s) */ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); /* * check for insanity where the present bitmaps * keep claiming to extend up to or even beyond the * stated radiotap header length */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); /* * no need to check again for blowing past stated radiotap * header length, because ieee80211_radiotap_iterator_next * checks it before it is dereferenced */ } iterator->this_arg = iterator->_arg; /* we are all initialized happily */ return 0; } EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); static void find_ns(struct ieee80211_radiotap_iterator *iterator, uint32_t oui, uint8_t subns) { int i; iterator->current_namespace = NULL; if (!iterator->_vns) return; for (i = 0; i < iterator->_vns->n_ns; i++) { if (iterator->_vns->ns[i].oui != oui) continue; if (iterator->_vns->ns[i].subns != subns) continue; iterator->current_namespace = &iterator->_vns->ns[i]; break; } } /** * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg * @iterator: radiotap_iterator to move to next arg (if any) * * Returns: 0 if there is an argument to handle, * -ENOENT if there are no more args or -EINVAL * if there is something else wrong. * * This function provides the next radiotap arg index (IEEE80211_RADIOTAP_*) * in @this_arg_index and sets @this_arg to point to the * payload for the field. It takes care of alignment handling and extended * present fields. @this_arg can be changed by the caller (eg, * incremented to move inside a compound argument like * IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in * little-endian format whatever the endianess of your CPU. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator) { while (1) { int hit = 0; int pad, align, size, subns; uint32_t oui; /* if no more EXT bits, that's it */ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && !(iterator->_bitmap_shifter & 1)) return -ENOENT; if (!(iterator->_bitmap_shifter & 1)) goto next_entry; /* arg not present */ /* get alignment/size of data */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: case IEEE80211_RADIOTAP_EXT: align = 1; size = 0; break; case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: align = 2; size = 6; break; default: if (!iterator->current_namespace || iterator->_arg_index >= iterator->current_namespace->n_bits) { if (iterator->current_namespace == &radiotap_ns) return -ENOENT; align = 0; } else { align = iterator->current_namespace->align_size[iterator->_arg_index].align; size = iterator->current_namespace->align_size[iterator->_arg_index].size; } if (!align) { /* skip all subsequent data */ iterator->_arg = iterator->_next_ns_data; /* give up on this namespace */ iterator->current_namespace = NULL; goto next_entry; } break; } /* * arg is present, account for alignment padding * * Note that these alignments are relative to the start * of the radiotap header. There is no guarantee * that the radiotap header itself is aligned on any * kind of boundary. * * The above is why get_unaligned() is used to dereference * multibyte elements from the radiotap area. */ pad = ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader) & (align - 1); if (pad) iterator->_arg += align - pad; if (iterator->_arg_index % 32 == IEEE80211_RADIOTAP_VENDOR_NAMESPACE) { int vnslen; if ((unsigned long)iterator->_arg + size - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; oui = (*iterator->_arg << 16) | (*(iterator->_arg + 1) << 8) | *(iterator->_arg + 2); subns = *(iterator->_arg + 3); find_ns(iterator, oui, subns); vnslen = get_unaligned_le16(iterator->_arg + 4); iterator->_next_ns_data = iterator->_arg + size + vnslen; if (!iterator->current_namespace) size += vnslen; } /* * this is what we will return to user, but we need to * move on first so next call has something fresh to test */ iterator->this_arg_index = iterator->_arg_index; iterator->this_arg = iterator->_arg; iterator->this_arg_size = size; /* internally move on the size of this arg */ iterator->_arg += size; /* * check for insanity where we are given a bitmap that * claims to have more arg content than the length of the * radiotap section. We will normally end up equalling this * max_length on the last arg, never exceeding it. */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; /* these special ones are valid in each bitmap word */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: iterator->_reset_on_ext = 1; iterator->is_radiotap_ns = 0; /* * If parser didn't register this vendor * namespace with us, allow it to show it * as 'raw. Do do that, set argument index * to vendor namespace. */ iterator->this_arg_index = IEEE80211_RADIOTAP_VENDOR_NAMESPACE; if (!iterator->current_namespace) hit = 1; goto next_entry; case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: iterator->_reset_on_ext = 1; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; goto next_entry; case IEEE80211_RADIOTAP_EXT: /* * bit 31 was set, there is more * -- move to next u32 bitmap */ iterator->_bitmap_shifter = get_unaligned_le32(iterator->_next_bitmap); iterator->_next_bitmap++; if (iterator->_reset_on_ext) iterator->_arg_index = 0; else iterator->_arg_index++; iterator->_reset_on_ext = 0; break; default: /* we've got a hit! */ hit = 1; next_entry: iterator->_bitmap_shifter >>= 1; iterator->_arg_index++; } /* if we found a valid arg earlier, return it now */ if (hit) return 0; } } EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
gpl-2.0
lukego/linux
net/wireless/radiotap.c
3677
11978
/* * Radiotap parser * * Copyright 2007 Andy Green <andy@warmcat.com> * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See COPYING for more details. */ #include <linux/kernel.h> #include <linux/export.h> #include <net/cfg80211.h> #include <net/ieee80211_radiotap.h> #include <asm/unaligned.h> /* function prototypes and related defs are in include/net/cfg80211.h */ static const struct radiotap_align_size rtap_namespace_sizes[] = { [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, }, [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, }, [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, }, [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, }, [IEEE80211_RADIOTAP_MCS] = { .align = 1, .size = 3, }, [IEEE80211_RADIOTAP_AMPDU_STATUS] = { .align = 4, .size = 8, }, /* * add more here as they are defined in radiotap.h */ }; static const struct ieee80211_radiotap_namespace radiotap_ns = { .n_bits = ARRAY_SIZE(rtap_namespace_sizes), .align_size = rtap_namespace_sizes, }; /** * ieee80211_radiotap_iterator_init - radiotap parser iterator initialization * @iterator: radiotap_iterator to initialize * @radiotap_header: radiotap header to parse * @max_length: total length we can parse into (eg, whole packet length) * * Returns: 0 or a negative error code if there is a problem. * * This function initializes an opaque iterator struct which can then * be passed to ieee80211_radiotap_iterator_next() to visit every radiotap * argument which is present in the header. It knows about extended * present headers and handles them. * * How to use: * call __ieee80211_radiotap_iterator_init() to init a semi-opaque iterator * struct ieee80211_radiotap_iterator (no need to init the struct beforehand) * checking for a good 0 return code. Then loop calling * __ieee80211_radiotap_iterator_next()... it returns either 0, * -ENOENT if there are no more args to parse, or -EINVAL if there is a problem. * The iterator's @this_arg member points to the start of the argument * associated with the current argument index that is present, which can be * found in the iterator's @this_arg_index member. This arg index corresponds * to the IEEE80211_RADIOTAP_... defines. * * Radiotap header length: * You can find the CPU-endian total radiotap header length in * iterator->max_length after executing ieee80211_radiotap_iterator_init() * successfully. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. * * Example code: * See Documentation/networking/radiotap-headers.txt */ int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { /* check the radiotap header can actually be present */ if (max_length < sizeof(struct ieee80211_radiotap_header)) return -EINVAL; /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; /* sanity check for allowed length and radiotap length field */ if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; /* find payload start allowing for extended bitmap(s) */ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); /* * check for insanity where the present bitmaps * keep claiming to extend up to or even beyond the * stated radiotap header length */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader + sizeof(uint32_t) > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); /* * no need to check again for blowing past stated radiotap * header length, because ieee80211_radiotap_iterator_next * checks it before it is dereferenced */ } iterator->this_arg = iterator->_arg; /* we are all initialized happily */ return 0; } EXPORT_SYMBOL(ieee80211_radiotap_iterator_init); static void find_ns(struct ieee80211_radiotap_iterator *iterator, uint32_t oui, uint8_t subns) { int i; iterator->current_namespace = NULL; if (!iterator->_vns) return; for (i = 0; i < iterator->_vns->n_ns; i++) { if (iterator->_vns->ns[i].oui != oui) continue; if (iterator->_vns->ns[i].subns != subns) continue; iterator->current_namespace = &iterator->_vns->ns[i]; break; } } /** * ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg * @iterator: radiotap_iterator to move to next arg (if any) * * Returns: 0 if there is an argument to handle, * -ENOENT if there are no more args or -EINVAL * if there is something else wrong. * * This function provides the next radiotap arg index (IEEE80211_RADIOTAP_*) * in @this_arg_index and sets @this_arg to point to the * payload for the field. It takes care of alignment handling and extended * present fields. @this_arg can be changed by the caller (eg, * incremented to move inside a compound argument like * IEEE80211_RADIOTAP_CHANNEL). The args pointed to are in * little-endian format whatever the endianess of your CPU. * * Alignment Gotcha: * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ int ieee80211_radiotap_iterator_next( struct ieee80211_radiotap_iterator *iterator) { while (1) { int hit = 0; int pad, align, size, subns; uint32_t oui; /* if no more EXT bits, that's it */ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT && !(iterator->_bitmap_shifter & 1)) return -ENOENT; if (!(iterator->_bitmap_shifter & 1)) goto next_entry; /* arg not present */ /* get alignment/size of data */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: case IEEE80211_RADIOTAP_EXT: align = 1; size = 0; break; case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: align = 2; size = 6; break; default: if (!iterator->current_namespace || iterator->_arg_index >= iterator->current_namespace->n_bits) { if (iterator->current_namespace == &radiotap_ns) return -ENOENT; align = 0; } else { align = iterator->current_namespace->align_size[iterator->_arg_index].align; size = iterator->current_namespace->align_size[iterator->_arg_index].size; } if (!align) { /* skip all subsequent data */ iterator->_arg = iterator->_next_ns_data; /* give up on this namespace */ iterator->current_namespace = NULL; goto next_entry; } break; } /* * arg is present, account for alignment padding * * Note that these alignments are relative to the start * of the radiotap header. There is no guarantee * that the radiotap header itself is aligned on any * kind of boundary. * * The above is why get_unaligned() is used to dereference * multibyte elements from the radiotap area. */ pad = ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader) & (align - 1); if (pad) iterator->_arg += align - pad; if (iterator->_arg_index % 32 == IEEE80211_RADIOTAP_VENDOR_NAMESPACE) { int vnslen; if ((unsigned long)iterator->_arg + size - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; oui = (*iterator->_arg << 16) | (*(iterator->_arg + 1) << 8) | *(iterator->_arg + 2); subns = *(iterator->_arg + 3); find_ns(iterator, oui, subns); vnslen = get_unaligned_le16(iterator->_arg + 4); iterator->_next_ns_data = iterator->_arg + size + vnslen; if (!iterator->current_namespace) size += vnslen; } /* * this is what we will return to user, but we need to * move on first so next call has something fresh to test */ iterator->this_arg_index = iterator->_arg_index; iterator->this_arg = iterator->_arg; iterator->this_arg_size = size; /* internally move on the size of this arg */ iterator->_arg += size; /* * check for insanity where we are given a bitmap that * claims to have more arg content than the length of the * radiotap section. We will normally end up equalling this * max_length on the last arg, never exceeding it. */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; /* these special ones are valid in each bitmap word */ switch (iterator->_arg_index % 32) { case IEEE80211_RADIOTAP_VENDOR_NAMESPACE: iterator->_reset_on_ext = 1; iterator->is_radiotap_ns = 0; /* * If parser didn't register this vendor * namespace with us, allow it to show it * as 'raw. Do do that, set argument index * to vendor namespace. */ iterator->this_arg_index = IEEE80211_RADIOTAP_VENDOR_NAMESPACE; if (!iterator->current_namespace) hit = 1; goto next_entry; case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE: iterator->_reset_on_ext = 1; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; goto next_entry; case IEEE80211_RADIOTAP_EXT: /* * bit 31 was set, there is more * -- move to next u32 bitmap */ iterator->_bitmap_shifter = get_unaligned_le32(iterator->_next_bitmap); iterator->_next_bitmap++; if (iterator->_reset_on_ext) iterator->_arg_index = 0; else iterator->_arg_index++; iterator->_reset_on_ext = 0; break; default: /* we've got a hit! */ hit = 1; next_entry: iterator->_bitmap_shifter >>= 1; iterator->_arg_index++; } /* if we found a valid arg earlier, return it now */ if (hit) return 0; } } EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
gpl-2.0
Sudokamikaze/XKernel-grouper
sound/isa/es1688/es1688.c
3933
10476
/* * Driver for generic ESS AudioDrive ESx688 soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/moduleparam.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/es1688.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define CRD_NAME "Generic ESS ES1688/ES688 AudioDrive" #define DEV_NAME "es1688" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100}," "{ESS,ES1688 PnP AudioDrive,pnp:ESS0102}," "{ESS,ES688 AudioDrive,pnp:ESS6881}," "{ESS,ES1688 AudioDrive,pnp:ESS1681}}"); MODULE_ALIAS("snd_es968"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ #ifdef CONFIG_PNP static int isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; #endif static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Usually 0x388 */ static long mpu_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard."); #endif MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for ES1688 driver."); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver."); module_param_array(dma8, int, NULL, 0444); MODULE_PARM_DESC(dma8, "8-bit DMA # for " CRD_NAME " driver."); #ifdef CONFIG_PNP #define is_isapnp_selected(dev) isapnp[dev] #else #define is_isapnp_selected(dev) 0 #endif static int __devinit snd_es1688_match(struct device *dev, unsigned int n) { return enable[n] && !is_isapnp_selected(n); } static int __devinit snd_es1688_legacy_create(struct snd_card *card, struct device *dev, unsigned int n) { struct snd_es1688 *chip = card->private_data; static long possible_ports[] = {0x220, 0x240, 0x260}; static int possible_irqs[] = {5, 9, 10, 7, -1}; static int possible_dmas[] = {1, 3, 0, -1}; int i, error; if (irq[n] == SNDRV_AUTO_IRQ) { irq[n] = snd_legacy_find_free_irq(possible_irqs); if (irq[n] < 0) { dev_err(dev, "unable to find a free IRQ\n"); return -EBUSY; } } if (dma8[n] == SNDRV_AUTO_DMA) { dma8[n] = snd_legacy_find_free_dma(possible_dmas); if (dma8[n] < 0) { dev_err(dev, "unable to find a free DMA\n"); return -EBUSY; } } if (port[n] != SNDRV_AUTO_PORT) return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); i = 0; do { port[n] = possible_ports[i]; error = snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } while (error < 0 && ++i < ARRAY_SIZE(possible_ports)); return error; } static int __devinit snd_es1688_probe(struct snd_card *card, unsigned int n) { struct snd_es1688 *chip = card->private_data; struct snd_opl3 *opl3; struct snd_pcm *pcm; int error; error = snd_es1688_pcm(card, chip, 0, &pcm); if (error < 0) return error; error = snd_es1688_mixer(card, chip); if (error < 0) return error; strlcpy(card->driver, "ES1688", sizeof(card->driver)); strlcpy(card->shortname, pcm->name, sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, irq %i, dma %i", pcm->name, chip->port, chip->irq, chip->dma8); if (fm_port[n] == SNDRV_AUTO_PORT) fm_port[n] = port[n]; /* share the same port */ if (fm_port[n] > 0) { if (snd_opl3_create(card, fm_port[n], fm_port[n] + 2, OPL3_HW_OPL3, 0, &opl3) < 0) dev_warn(card->dev, "opl3 not detected at 0x%lx\n", fm_port[n]); else { error = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (error < 0) return error; } } if (mpu_irq[n] >= 0 && mpu_irq[n] != SNDRV_AUTO_IRQ && chip->mpu_port > 0) { error = snd_mpu401_uart_new(card, 0, MPU401_HW_ES1688, chip->mpu_port, 0, mpu_irq[n], IRQF_DISABLED, NULL); if (error < 0) return error; } return snd_card_register(card); } static int __devinit snd_es1688_isa_probe(struct device *dev, unsigned int n) { struct snd_card *card; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; error = snd_es1688_legacy_create(card, dev, n); if (error < 0) goto out; snd_card_set_dev(card, dev); error = snd_es1688_probe(card, n); if (error < 0) goto out; dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int __devexit snd_es1688_isa_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_es1688_driver = { .match = snd_es1688_match, .probe = snd_es1688_isa_probe, .remove = __devexit_p(snd_es1688_isa_remove), #if 0 /* FIXME */ .suspend = snd_es1688_suspend, .resume = snd_es1688_resume, #endif .driver = { .name = DEV_NAME } }; static int snd_es968_pnp_is_probed; #ifdef CONFIG_PNP static int __devinit snd_card_es968_pnp(struct snd_card *card, unsigned int n, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_es1688 *chip = card->private_data; struct pnp_dev *pdev; int error; pdev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (pdev == NULL) return -ENODEV; error = pnp_activate_dev(pdev); if (error < 0) { snd_printk(KERN_ERR "ES968 pnp configure failure\n"); return error; } port[n] = pnp_port_start(pdev, 0); dma8[n] = pnp_dma(pdev, 0); irq[n] = pnp_irq(pdev, 0); return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } static int __devinit snd_es968_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_card *card; static unsigned int dev; int error; struct snd_es1688 *chip; if (snd_es968_pnp_is_probed) return -EBUSY; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev == SNDRV_CARDS) return -ENODEV; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; chip = card->private_data; error = snd_card_es968_pnp(card, dev, pcard, pid); if (error < 0) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); error = snd_es1688_probe(card, dev); if (error < 0) return error; pnp_set_card_drvdata(pcard, card); snd_es968_pnp_is_probed = 1; return 0; } static void __devexit snd_es968_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); snd_es968_pnp_is_probed = 0; } #ifdef CONFIG_PM static int snd_es968_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); return 0; } static int snd_es968_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_es1688_reset(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_device_id snd_es968_pnpids[] = { { .id = "ESS0968", .devs = { { "@@@0968" }, } }, { .id = "ESS0968", .devs = { { "ESS0968" }, } }, { .id = "", } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_es968_pnpids); static struct pnp_card_driver es968_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = DEV_NAME " PnP", .id_table = snd_es968_pnpids, .probe = snd_es968_pnp_detect, .remove = __devexit_p(snd_es968_pnp_remove), #ifdef CONFIG_PM .suspend = snd_es968_pnp_suspend, .resume = snd_es968_pnp_resume, #endif }; #endif static int __init alsa_card_es1688_init(void) { #ifdef CONFIG_PNP pnp_register_card_driver(&es968_pnpc_driver); if (snd_es968_pnp_is_probed) return 0; pnp_unregister_card_driver(&es968_pnpc_driver); #endif return isa_register_driver(&snd_es1688_driver, SNDRV_CARDS); } static void __exit alsa_card_es1688_exit(void) { if (!snd_es968_pnp_is_probed) { isa_unregister_driver(&snd_es1688_driver); return; } #ifdef CONFIG_PNP pnp_unregister_card_driver(&es968_pnpc_driver); #endif } module_init(alsa_card_es1688_init); module_exit(alsa_card_es1688_exit);
gpl-2.0
SteadyQuad/android_kernel_yotaphone2
arch/arm/mach-omap2/omap-hotplug.c
4445
1491
/* * OMAP4 SMP cpu-hotplug support * * Copyright (C) 2010 Texas Instruments, Inc. * Author: * Santosh Shilimkar <santosh.shilimkar@ti.com> * * Platform file needed for the OMAP4 SMP. This file is based on arm * realview smp platform. * Copyright (c) 2002 ARM Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include "common.h" #include "powerdomain.h" int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * Called with IRQs disabled */ void __ref platform_cpu_die(unsigned int cpu) { unsigned int this_cpu; flush_cache_all(); dsb(); /* * we're ready for shutdown now, so do it */ if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) pr_err("Secure clear status failed\n"); for (;;) { /* * Enter into low power state */ omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); this_cpu = smp_processor_id(); if (omap_read_auxcoreboot0() == this_cpu) { /* * OK, proper wakeup, we're done */ break; } pr_debug("CPU%u: spurious wakeup call\n", cpu); } } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
Validus-Lollipop/android_kernel_motorola_msm8960dt-common
arch/arm/mach-pxa/cm-x2xx.c
4957
12036
/* * linux/arch/arm/mach-pxa/cm-x2xx.c * * Copyright (C) 2008 CompuLab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/dm9000.h> #include <linux/leds.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <mach/pxa25x.h> #include <mach/pxa27x.h> #include <mach/audio.h> #include <mach/pxafb.h> #include <mach/smemc.h> #include <asm/hardware/it8152.h> #include "generic.h" #include "cm-x2xx-pci.h" extern void cmx255_init(void); extern void cmx270_init(void); /* reserve IRQs for IT8152 */ #define CMX2XX_NR_IRQS (IRQ_BOARD_START + 40) /* virtual addresses for statically mapped regions */ #define CMX2XX_VIRT_BASE (void __iomem *)(0xe8000000) #define CMX2XX_IT8152_VIRT (CMX2XX_VIRT_BASE) /* physical address if local-bus attached devices */ #define CMX255_DM9000_PHYS_BASE (PXA_CS1_PHYS + (8 << 22)) #define CMX270_DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22)) /* leds */ #define CMX255_GPIO_RED (27) #define CMX255_GPIO_GREEN (32) #define CMX270_GPIO_RED (93) #define CMX270_GPIO_GREEN (94) /* GPIO IRQ usage */ #define GPIO22_ETHIRQ (22) #define GPIO10_ETHIRQ (10) #define CMX255_GPIO_IT8152_IRQ (0) #define CMX270_GPIO_IT8152_IRQ (22) #define CMX255_ETHIRQ PXA_GPIO_TO_IRQ(GPIO22_ETHIRQ) #define CMX270_ETHIRQ PXA_GPIO_TO_IRQ(GPIO10_ETHIRQ) #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) static struct resource cmx255_dm9000_resource[] = { [0] = { .start = CMX255_DM9000_PHYS_BASE, .end = CMX255_DM9000_PHYS_BASE + 3, .flags = IORESOURCE_MEM, }, [1] = { .start = CMX255_DM9000_PHYS_BASE + 4, .end = CMX255_DM9000_PHYS_BASE + 4 + 500, .flags = IORESOURCE_MEM, }, [2] = { .start = CMX255_ETHIRQ, .end = CMX255_ETHIRQ, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct resource cmx270_dm9000_resource[] = { [0] = { .start = CMX270_DM9000_PHYS_BASE, .end = CMX270_DM9000_PHYS_BASE + 3, .flags = IORESOURCE_MEM, }, [1] = { .start = CMX270_DM9000_PHYS_BASE + 8, .end = CMX270_DM9000_PHYS_BASE + 8 + 500, .flags = IORESOURCE_MEM, }, [2] = { .start = CMX270_ETHIRQ, .end = CMX270_ETHIRQ, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct dm9000_plat_data cmx270_dm9000_platdata = { .flags = DM9000_PLATF_32BITONLY | DM9000_PLATF_NO_EEPROM, }; static struct platform_device cmx2xx_dm9000_device = { .name = "dm9000", .id = 0, .num_resources = ARRAY_SIZE(cmx270_dm9000_resource), .dev = { .platform_data = &cmx270_dm9000_platdata, } }; static void __init cmx2xx_init_dm9000(void) { if (cpu_is_pxa25x()) cmx2xx_dm9000_device.resource = cmx255_dm9000_resource; else cmx2xx_dm9000_device.resource = cmx270_dm9000_resource; platform_device_register(&cmx2xx_dm9000_device); } #else static inline void cmx2xx_init_dm9000(void) {} #endif /* UCB1400 touchscreen controller */ #if defined(CONFIG_TOUCHSCREEN_UCB1400) || defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE) static struct platform_device cmx2xx_ts_device = { .name = "ucb1400_core", .id = -1, }; static void __init cmx2xx_init_touchscreen(void) { platform_device_register(&cmx2xx_ts_device); } #else static inline void cmx2xx_init_touchscreen(void) {} #endif /* CM-X270 LEDs */ #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led cmx2xx_leds[] = { [0] = { .name = "cm-x2xx:red", .default_trigger = "nand-disk", .active_low = 1, }, [1] = { .name = "cm-x2xx:green", .default_trigger = "heartbeat", .active_low = 1, }, }; static struct gpio_led_platform_data cmx2xx_gpio_led_pdata = { .num_leds = ARRAY_SIZE(cmx2xx_leds), .leds = cmx2xx_leds, }; static struct platform_device cmx2xx_led_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &cmx2xx_gpio_led_pdata, }, }; static void __init cmx2xx_init_leds(void) { if (cpu_is_pxa25x()) { cmx2xx_leds[0].gpio = CMX255_GPIO_RED; cmx2xx_leds[1].gpio = CMX255_GPIO_GREEN; } else { cmx2xx_leds[0].gpio = CMX270_GPIO_RED; cmx2xx_leds[1].gpio = CMX270_GPIO_GREEN; } platform_device_register(&cmx2xx_led_device); } #else static inline void cmx2xx_init_leds(void) {} #endif #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) /* Display definitions keep these for backwards compatibility, although symbolic names (as e.g. in lpd270.c) looks better */ #define MTYPE_STN320x240 0 #define MTYPE_TFT640x480 1 #define MTYPE_CRT640x480 2 #define MTYPE_CRT800x600 3 #define MTYPE_TFT320x240 6 #define MTYPE_STN640x480 7 static struct pxafb_mode_info generic_stn_320x240_mode = { .pixclock = 76923, .bpp = 8, .xres = 320, .yres = 240, .hsync_len = 3, .vsync_len = 2, .left_margin = 3, .upper_margin = 0, .right_margin = 3, .lower_margin = 0, .sync = (FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT), .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_stn_320x240 = { .modes = &generic_stn_320x240_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_STN_8BPP | LCD_PCLK_EDGE_FALL |\ LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mode_info generic_tft_640x480_mode = { .pixclock = 38461, .bpp = 8, .xres = 640, .yres = 480, .hsync_len = 60, .vsync_len = 2, .left_margin = 70, .upper_margin = 10, .right_margin = 70, .lower_margin = 5, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_tft_640x480 = { .modes = &generic_tft_640x480_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_PCLK_EDGE_FALL |\ LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mode_info generic_crt_640x480_mode = { .pixclock = 38461, .bpp = 8, .xres = 640, .yres = 480, .hsync_len = 63, .vsync_len = 2, .left_margin = 81, .upper_margin = 33, .right_margin = 16, .lower_margin = 10, .sync = (FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT), .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_crt_640x480 = { .modes = &generic_crt_640x480_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mode_info generic_crt_800x600_mode = { .pixclock = 28846, .bpp = 8, .xres = 800, .yres = 600, .hsync_len = 63, .vsync_len = 2, .left_margin = 26, .upper_margin = 21, .right_margin = 26, .lower_margin = 11, .sync = (FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT), .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_crt_800x600 = { .modes = &generic_crt_800x600_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mode_info generic_tft_320x240_mode = { .pixclock = 134615, .bpp = 16, .xres = 320, .yres = 240, .hsync_len = 63, .vsync_len = 7, .left_margin = 75, .upper_margin = 0, .right_margin = 15, .lower_margin = 15, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_tft_320x240 = { .modes = &generic_tft_320x240_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mode_info generic_stn_640x480_mode = { .pixclock = 57692, .bpp = 8, .xres = 640, .yres = 480, .hsync_len = 4, .vsync_len = 2, .left_margin = 10, .upper_margin = 5, .right_margin = 10, .lower_margin = 5, .sync = (FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT), .cmap_greyscale = 0, }; static struct pxafb_mach_info generic_stn_640x480 = { .modes = &generic_stn_640x480_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_STN_8BPP | LCD_AC_BIAS_FREQ(0xff), .cmap_inverse = 0, .cmap_static = 0, }; static struct pxafb_mach_info *cmx2xx_display = &generic_crt_640x480; static int __init cmx2xx_set_display(char *str) { int disp_type = simple_strtol(str, NULL, 0); switch (disp_type) { case MTYPE_STN320x240: cmx2xx_display = &generic_stn_320x240; break; case MTYPE_TFT640x480: cmx2xx_display = &generic_tft_640x480; break; case MTYPE_CRT640x480: cmx2xx_display = &generic_crt_640x480; break; case MTYPE_CRT800x600: cmx2xx_display = &generic_crt_800x600; break; case MTYPE_TFT320x240: cmx2xx_display = &generic_tft_320x240; break; case MTYPE_STN640x480: cmx2xx_display = &generic_stn_640x480; break; default: /* fallback to CRT 640x480 */ cmx2xx_display = &generic_crt_640x480; break; } return 1; } /* This should be done really early to get proper configuration for frame buffer. Indeed, pxafb parameters can be used istead, but CM-X2XX bootloader has limitied line length for kernel command line, and also it will break compatibitlty with proprietary releases already in field. */ __setup("monitor=", cmx2xx_set_display); static void __init cmx2xx_init_display(void) { pxa_set_fb_info(NULL, cmx2xx_display); } #else static inline void cmx2xx_init_display(void) {} #endif #ifdef CONFIG_PM static unsigned long sleep_save_msc[10]; static int cmx2xx_suspend(void) { cmx2xx_pci_suspend(); /* save MSC registers */ sleep_save_msc[0] = __raw_readl(MSC0); sleep_save_msc[1] = __raw_readl(MSC1); sleep_save_msc[2] = __raw_readl(MSC2); /* setup power saving mode registers */ PCFR = 0x0; PSLR = 0xff400000; PMCR = 0x00000005; PWER = 0x80000000; PFER = 0x00000000; PRER = 0x00000000; PGSR0 = 0xC0018800; PGSR1 = 0x004F0002; PGSR2 = 0x6021C000; PGSR3 = 0x00020000; return 0; } static void cmx2xx_resume(void) { cmx2xx_pci_resume(); /* restore MSC registers */ __raw_writel(sleep_save_msc[0], MSC0); __raw_writel(sleep_save_msc[1], MSC1); __raw_writel(sleep_save_msc[2], MSC2); } static struct syscore_ops cmx2xx_pm_syscore_ops = { .resume = cmx2xx_resume, .suspend = cmx2xx_suspend, }; static int __init cmx2xx_pm_init(void) { register_syscore_ops(&cmx2xx_pm_syscore_ops); return 0; } #else static int __init cmx2xx_pm_init(void) { return 0; } #endif #if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE) static void __init cmx2xx_init_ac97(void) { pxa_set_ac97_info(NULL); } #else static inline void cmx2xx_init_ac97(void) {} #endif static void __init cmx2xx_init(void) { pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); cmx2xx_pm_init(); if (cpu_is_pxa25x()) cmx255_init(); else cmx270_init(); cmx2xx_init_dm9000(); cmx2xx_init_display(); cmx2xx_init_ac97(); cmx2xx_init_touchscreen(); cmx2xx_init_leds(); } static void __init cmx2xx_init_irq(void) { if (cpu_is_pxa25x()) { pxa25x_init_irq(); cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); } else { pxa27x_init_irq(); cmx2xx_pci_init_irq(CMX270_GPIO_IT8152_IRQ); } } #ifdef CONFIG_PCI /* Map PCI companion statically */ static struct map_desc cmx2xx_io_desc[] __initdata = { [0] = { /* PCI bridge */ .virtual = (unsigned long)CMX2XX_IT8152_VIRT, .pfn = __phys_to_pfn(PXA_CS4_PHYS), .length = SZ_64M, .type = MT_DEVICE }, }; static void __init cmx2xx_map_io(void) { if (cpu_is_pxa25x()) pxa25x_map_io(); if (cpu_is_pxa27x()) pxa27x_map_io(); iotable_init(cmx2xx_io_desc, ARRAY_SIZE(cmx2xx_io_desc)); it8152_base_address = CMX2XX_IT8152_VIRT; } #else static void __init cmx2xx_map_io(void) { if (cpu_is_pxa25x()) pxa25x_map_io(); if (cpu_is_pxa27x()) pxa27x_map_io(); } #endif MACHINE_START(ARMCORE, "Compulab CM-X2XX") .atag_offset = 0x100, .map_io = cmx2xx_map_io, .nr_irqs = CMX2XX_NR_IRQS, .init_irq = cmx2xx_init_irq, /* NOTE: pxa25x_handle_irq() works on PXA27x w/o camera support */ .handle_irq = pxa25x_handle_irq, .timer = &pxa_timer, .init_machine = cmx2xx_init, #ifdef CONFIG_PCI .dma_zone_size = SZ_64M, #endif .restart = pxa_restart, MACHINE_END
gpl-2.0
Fusion-Devices/android_kernel_motorola_msm8916
drivers/input/touchscreen/mainstone-wm97xx.c
7517
7680
/* * mainstone-wm97xx.c -- Mainstone Continuous Touch screen driver for * Wolfson WM97xx AC97 Codecs. * * Copyright 2004, 2007 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * Parts Copyright : Ian Molton <spyro@f2s.com> * Andrew Zabolotny <zap@homelink.ru> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Notes: * This is a wm97xx extended touch driver to capture touch * data in a continuous manner on the Intel XScale architecture * * Features: * - codecs supported:- WM9705, WM9712, WM9713 * - processors supported:- Intel XScale PXA25x, PXA26x, PXA27x * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/wm97xx.h> #include <linux/io.h> #include <linux/gpio.h> #include <mach/regs-ac97.h> #include <asm/mach-types.h> struct continuous { u16 id; /* codec id */ u8 code; /* continuous code */ u8 reads; /* number of coord reads per read cycle */ u32 speed; /* number of coords per second */ }; #define WM_READS(sp) ((sp / HZ) + 1) static const struct continuous cinfo[] = { {WM9705_ID2, 0, WM_READS(94), 94}, {WM9705_ID2, 1, WM_READS(188), 188}, {WM9705_ID2, 2, WM_READS(375), 375}, {WM9705_ID2, 3, WM_READS(750), 750}, {WM9712_ID2, 0, WM_READS(94), 94}, {WM9712_ID2, 1, WM_READS(188), 188}, {WM9712_ID2, 2, WM_READS(375), 375}, {WM9712_ID2, 3, WM_READS(750), 750}, {WM9713_ID2, 0, WM_READS(94), 94}, {WM9713_ID2, 1, WM_READS(120), 120}, {WM9713_ID2, 2, WM_READS(154), 154}, {WM9713_ID2, 3, WM_READS(188), 188}, }; /* continuous speed index */ static int sp_idx; static u16 last, tries; static int irq; /* * Pen sampling frequency (Hz) in continuous mode. */ static int cont_rate = 200; module_param(cont_rate, int, 0); MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)"); /* * Pen down detection. * * This driver can either poll or use an interrupt to indicate a pen down * event. If the irq request fails then it will fall back to polling mode. */ static int pen_int; module_param(pen_int, int, 0); MODULE_PARM_DESC(pen_int, "Pen down detection (1 = interrupt, 0 = polling)"); /* * Pressure readback. * * Set to 1 to read back pen down pressure */ static int pressure; module_param(pressure, int, 0); MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)"); /* * AC97 touch data slot. * * Touch screen readback data ac97 slot */ static int ac97_touch_slot = 5; module_param(ac97_touch_slot, int, 0); MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number"); /* flush AC97 slot 5 FIFO on pxa machines */ #ifdef CONFIG_PXA27x static void wm97xx_acc_pen_up(struct wm97xx *wm) { schedule_timeout_uninterruptible(1); while (MISR & (1 << 2)) MODR; } #else static void wm97xx_acc_pen_up(struct wm97xx *wm) { unsigned int count; schedule_timeout_uninterruptible(1); for (count = 0; count < 16; count++) MODR; } #endif static int wm97xx_acc_pen_down(struct wm97xx *wm) { u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES; int reads = 0; /* When the AC97 queue has been drained we need to allow time * to buffer up samples otherwise we end up spinning polling * for samples. The controller can't have a suitably low * threshold set to use the notifications it gives. */ schedule_timeout_uninterruptible(1); if (tries > 5) { tries = 0; return RC_PENUP; } x = MODR; if (x == last) { tries++; return RC_AGAIN; } last = x; do { if (reads) x = MODR; y = MODR; if (pressure) p = MODR; dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n", x, y, p); /* are samples valid */ if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X || (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y || (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES) goto up; /* coordinate is good */ tries = 0; input_report_abs(wm->input_dev, ABS_X, x & 0xfff); input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); input_report_key(wm->input_dev, BTN_TOUCH, (p != 0)); input_sync(wm->input_dev); reads++; } while (reads < cinfo[sp_idx].reads); up: return RC_PENDOWN | RC_AGAIN; } static int wm97xx_acc_startup(struct wm97xx *wm) { int idx = 0, ret = 0; /* check we have a codec */ if (wm->ac97 == NULL) return -ENODEV; /* Go you big red fire engine */ for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) { if (wm->id != cinfo[idx].id) continue; sp_idx = idx; if (cont_rate <= cinfo[idx].speed) break; } wm->acc_rate = cinfo[sp_idx].code; wm->acc_slot = ac97_touch_slot; dev_info(wm->dev, "mainstone accelerated touchscreen driver, %d samples/sec\n", cinfo[sp_idx].speed); /* IRQ driven touchscreen is used on Palm hardware */ if (machine_is_palmt5() || machine_is_palmtx() || machine_is_palmld()) { pen_int = 1; irq = 27; /* There is some obscure mutant of WM9712 interbred with WM9713 * used on Palm HW */ wm->variant = WM97xx_WM1613; } else if (machine_is_mainstone() && pen_int) irq = 4; if (irq) { ret = gpio_request(irq, "Touchscreen IRQ"); if (ret) goto out; ret = gpio_direction_input(irq); if (ret) { gpio_free(irq); goto out; } wm->pen_irq = gpio_to_irq(irq); irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH); } else /* pen irq not supported */ pen_int = 0; /* codec specific irq config */ if (pen_int) { switch (wm->id) { case WM9705_ID2: break; case WM9712_ID2: case WM9713_ID2: /* use PEN_DOWN GPIO 13 to assert IRQ on GPIO line 2 */ wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, WM97XX_GPIO_POL_HIGH, WM97XX_GPIO_STICKY, WM97XX_GPIO_WAKE); wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT, WM97XX_GPIO_POL_HIGH, WM97XX_GPIO_NOTSTICKY, WM97XX_GPIO_NOWAKE); break; default: dev_err(wm->dev, "pen down irq not supported on this device\n"); pen_int = 0; break; } } out: return ret; } static void wm97xx_acc_shutdown(struct wm97xx *wm) { /* codec specific deconfig */ if (pen_int) { if (irq) gpio_free(irq); wm->pen_irq = 0; } } static void wm97xx_irq_enable(struct wm97xx *wm, int enable) { if (enable) enable_irq(wm->pen_irq); else disable_irq_nosync(wm->pen_irq); } static struct wm97xx_mach_ops mainstone_mach_ops = { .acc_enabled = 1, .acc_pen_up = wm97xx_acc_pen_up, .acc_pen_down = wm97xx_acc_pen_down, .acc_startup = wm97xx_acc_startup, .acc_shutdown = wm97xx_acc_shutdown, .irq_enable = wm97xx_irq_enable, .irq_gpio = WM97XX_GPIO_2, }; static int mainstone_wm97xx_probe(struct platform_device *pdev) { struct wm97xx *wm = platform_get_drvdata(pdev); return wm97xx_register_mach_ops(wm, &mainstone_mach_ops); } static int mainstone_wm97xx_remove(struct platform_device *pdev) { struct wm97xx *wm = platform_get_drvdata(pdev); wm97xx_unregister_mach_ops(wm); return 0; } static struct platform_driver mainstone_wm97xx_driver = { .probe = mainstone_wm97xx_probe, .remove = mainstone_wm97xx_remove, .driver = { .name = "wm97xx-touch", }, }; module_platform_driver(mainstone_wm97xx_driver); /* Module information */ MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>"); MODULE_DESCRIPTION("wm97xx continuous touch driver for mainstone"); MODULE_LICENSE("GPL");
gpl-2.0
flashbuckets/linux
arch/sh/kernel/reboot.c
9053
1781
#include <linux/pm.h> #include <linux/kexec.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/module.h> #ifdef CONFIG_SUPERH32 #include <asm/watchdog.h> #endif #include <asm/addrspace.h> #include <asm/reboot.h> #include <asm/tlbflush.h> #include <asm/traps.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); #ifdef CONFIG_SUPERH32 static void watchdog_trigger_immediate(void) { sh_wdt_write_cnt(0xFF); sh_wdt_write_csr(0xC2); } #endif static void native_machine_restart(char * __unused) { local_irq_disable(); /* Destroy all of the TLBs in preparation for reset by MMU */ __flush_tlb_global(); /* Address error with SR.BL=1 first. */ trigger_address_error(); #ifdef CONFIG_SUPERH32 /* If that fails or is unsupported, go for the watchdog next. */ watchdog_trigger_immediate(); #endif /* * Give up and sleep. */ while (1) cpu_sleep(); } static void native_machine_shutdown(void) { smp_send_stop(); } static void native_machine_power_off(void) { if (pm_power_off) pm_power_off(); } static void native_machine_halt(void) { /* stop other cpus */ machine_shutdown(); /* stop this cpu */ stop_this_cpu(NULL); } struct machine_ops machine_ops = { .power_off = native_machine_power_off, .shutdown = native_machine_shutdown, .restart = native_machine_restart, .halt = native_machine_halt, #ifdef CONFIG_KEXEC .crash_shutdown = native_machine_crash_shutdown, #endif }; void machine_power_off(void) { machine_ops.power_off(); } void machine_shutdown(void) { machine_ops.shutdown(); } void machine_restart(char *cmd) { machine_ops.restart(cmd); } void machine_halt(void) { machine_ops.halt(); } #ifdef CONFIG_KEXEC void machine_crash_shutdown(struct pt_regs *regs) { machine_ops.crash_shutdown(regs); } #endif
gpl-2.0
languitar/android_kernel_lge_hammerhead
drivers/net/wireless/zd1211rw/zd_rf_al2230.c
10589
12846
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include "zd_rf.h" #include "zd_usb.h" #include "zd_chip.h" #define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF) static const u32 zd1211_al2230_table[][3] = { RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, }, RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, }, RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, }, RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, }, RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, }, RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, }, RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, }, RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, }, RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, }, RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, }, RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, }, RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, }, }; static const u32 zd1211b_al2230_table[][3] = { RF_CHANNEL( 1) = { 0x09efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 2) = { 0x09efc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 3) = { 0x09e7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 4) = { 0x09e7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 5) = { 0x05efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 6) = { 0x05efc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 7) = { 0x05e7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 8) = { 0x05e7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 9) = { 0x0defc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(10) = { 0x0defc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL(11) = { 0x0de7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(12) = { 0x0de7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL(13) = { 0x03efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(14) = { 0x03e7c0, 0x866660, 0xb00000, }, }; static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = { { ZD_CR240, 0x57 }, { ZD_CR9, 0xe0 }, }; static const struct zd_ioreq16 ioreqs_init_al2230s[] = { { ZD_CR47, 0x1e }, /* MARK_002 */ { ZD_CR106, 0x22 }, { ZD_CR107, 0x2a }, /* MARK_002 */ { ZD_CR109, 0x13 }, /* MARK_002 */ { ZD_CR118, 0xf8 }, /* MARK_002 */ { ZD_CR119, 0x12 }, { ZD_CR122, 0xe0 }, { ZD_CR128, 0x10 }, /* MARK_001 from 0xe->0x10 */ { ZD_CR129, 0x0e }, /* MARK_001 from 0xd->0x0e */ { ZD_CR130, 0x10 }, /* MARK_001 from 0xb->0x0d */ }; static int zd1211b_al2230_finalize_rf(struct zd_chip *chip) { int r; static const struct zd_ioreq16 ioreqs[] = { { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 }, { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 }, { ZD_CR203, 0x06 }, { }, { ZD_CR240, 0x80 }, }; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; /* related to antenna selection? */ if (chip->new_phy_layout) { r = zd_iowrite16_locked(chip, 0xe1, ZD_CR9); if (r) return r; } return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int zd1211_al2230_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs_init[] = { { ZD_CR15, 0x20 }, { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x11 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR44, 0x33 }, { ZD_CR106, 0x2a }, { ZD_CR107, 0x1a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x27 }, { ZD_CR111, 0x2b }, { ZD_CR112, 0x2b }, { ZD_CR119, 0x0a }, { ZD_CR10, 0x89 }, /* for newest (3rd cut) AL2300 */ { ZD_CR17, 0x28 }, { ZD_CR26, 0x93 }, { ZD_CR34, 0x30 }, /* for newest (3rd cut) AL2300 */ { ZD_CR35, 0x3e }, { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, /* for newest (3rd cut) AL2300 */ { ZD_CR46, 0x96 }, { ZD_CR47, 0x1e }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR92, 0x0a }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x00 }, { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x24 }, { ZD_CR107, 0x2a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x13 }, { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, /* for newest (3rd cut) AL2300 */ { ZD_CR115, 0x24 }, { ZD_CR116, 0x24 }, { ZD_CR117, 0xf4 }, { ZD_CR118, 0xfc }, { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x77 }, { ZD_CR122, 0xe0 }, { ZD_CR137, 0x88 }, { ZD_CR252, 0xff }, { ZD_CR253, 0xff }, }; static const struct zd_ioreq16 ioreqs_pll[] = { /* shdnb(PLL_ON)=0 */ { ZD_CR251, 0x2f }, /* shdnb(PLL_ON)=1 */ { ZD_CR251, 0x3f }, { ZD_CR138, 0x28 }, { ZD_CR203, 0x06 }, }; static const u32 rv1[] = { /* Channel 1 */ 0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3, }; static const u32 rv2[] = { 0x000da4, 0x0f4dc5, /* fix freq shift, 0x04edc5 */ 0x0805b6, 0x011687, 0x000688, 0x0403b9, /* external control TX power (ZD_CR31) */ 0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00500f, }; static const u32 rv3[] = { 0x00d00f, 0x004c0f, 0x00540f, 0x00700f, 0x00500f, }; r = zd_iowrite16a_locked(chip, ioreqs_init, ARRAY_SIZE(ioreqs_init)); if (r) return r; if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) return r; } r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS); if (r) return r; /* improve band edge for AL2230S */ if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS); if (r) return r; r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_pll, ARRAY_SIZE(ioreqs_pll)); if (r) return r; r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS); if (r) return r; return 0; } static int zd1211b_al2230_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs1[] = { { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 }, { ZD_CR17, 0x2B }, /* for newest(3rd cut) AL2230 */ { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR33, 0x28 }, /* 5621 */ { ZD_CR34, 0x30 }, { ZD_CR35, 0x3e }, /* for newest(3rd cut) AL2230 */ { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, { ZD_CR46, 0x99 }, /* for newest(3rd cut) AL2230 */ { ZD_CR47, 0x1e }, /* ZD1211B 05.06.10 */ { ZD_CR48, 0x06 }, { ZD_CR49, 0xf9 }, { ZD_CR51, 0x01 }, { ZD_CR52, 0x80 }, { ZD_CR53, 0x7e }, { ZD_CR65, 0x00 }, { ZD_CR66, 0x00 }, { ZD_CR67, 0x00 }, { ZD_CR68, 0x00 }, { ZD_CR69, 0x28 }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR91, 0x00 }, /* 5621 */ { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d }, /* 4804, for 1212 new algorithm */ { ZD_CR99, 0x00 }, /* 5621 */ { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x24 }, /* for newest(3rd cut) AL2230 */ { ZD_CR107, 0x2a }, { ZD_CR109, 0x13 }, /* 4804, for 1212 new algorithm */ { ZD_CR110, 0x1f }, /* 4804, for 1212 new algorithm */ { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) * AL2230 */ { ZD_CR116, 0x24 }, { ZD_CR117, 0xfa }, /* for 1211b */ { ZD_CR118, 0xfa }, /* for 1211b */ { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x6c }, /* for 1211b */ { ZD_CR122, 0xfc }, /* E0->FC at 4902 */ { ZD_CR123, 0x57 }, /* 5623 */ { ZD_CR125, 0xad }, /* 4804, for 1212 new algorithm */ { ZD_CR126, 0x6c }, /* 5614 */ { ZD_CR127, 0x03 }, /* 4804, for 1212 new algorithm */ { ZD_CR137, 0x50 }, /* 5614 */ { ZD_CR138, 0xa8 }, { ZD_CR144, 0xac }, /* 5621 */ { ZD_CR150, 0x0d }, { ZD_CR252, 0x34 }, { ZD_CR253, 0x34 }, }; static const u32 rv1[] = { 0x8cccd0, 0x481dc0, 0xcfff00, 0x25a000, }; static const u32 rv2[] = { /* To improve AL2230 yield, improve phase noise, 4713 */ 0x25a000, 0xa3b2f0, 0x6da010, /* Reg6 update for MP versio */ 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */ 0x116000, 0x9dc020, /* External control TX power (ZD_CR31) */ 0x5ddb00, /* RegA update for MP version */ 0xd99000, /* RegB update for MP version */ 0x3ffbd0, /* RegC update for MP version */ 0xb00000, /* RegD update for MP version */ /* improve phase noise and remove phase calibration,4713 */ 0xf01a00, }; static const struct zd_ioreq16 ioreqs2[] = { { ZD_CR251, 0x2f }, /* shdnb(PLL_ON)=0 */ { ZD_CR251, 0x7f }, /* shdnb(PLL_ON)=1 */ }; static const u32 rv3[] = { /* To improve AL2230 yield, 4713 */ 0xf01b00, 0xf01e00, 0xf01a00, }; static const struct zd_ioreq16 ioreqs3[] = { /* related to 6M band edge patching, happens unconditionally */ { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, }; r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, ARRAY_SIZE(zd1211b_ioreqs_shared_1)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1)); if (r) return r; if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) return r; } r = zd_rfwritev_cr_locked(chip, zd1211b_al2230_table[0], 3); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv1, ARRAY_SIZE(rv1)); if (r) return r; if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv2, ARRAY_SIZE(rv2)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv3, ARRAY_SIZE(rv3)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3)); if (r) return r; return zd1211b_al2230_finalize_rf(chip); } static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = zd1211_al2230_table[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR138, 0x28 }, { ZD_CR203, 0x06 }, }; r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS); if (r) return r; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al2230_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = zd1211b_al2230_table[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, ARRAY_SIZE(zd1211b_ioreqs_shared_1)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv, 3); if (r) return r; return zd1211b_al2230_finalize_rf(chip); } static int zd1211_al2230_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x7f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int al2230_switch_radio_off(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } int zd_rf_init_al2230(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); rf->switch_radio_off = al2230_switch_radio_off; if (zd_chip_is_zd1211b(chip)) { rf->init_hw = zd1211b_al2230_init_hw; rf->set_channel = zd1211b_al2230_set_channel; rf->switch_radio_on = zd1211b_al2230_switch_radio_on; } else { rf->init_hw = zd1211_al2230_init_hw; rf->set_channel = zd1211_al2230_set_channel; rf->switch_radio_on = zd1211_al2230_switch_radio_on; } rf->patch_6m_band_edge = zd_rf_generic_patch_6m; rf->patch_cck_gain = 1; return 0; }
gpl-2.0
Snuzzo/funky_jewel
drivers/net/wireless/zd1211rw/zd_rf_al2230.c
10589
12846
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include "zd_rf.h" #include "zd_usb.h" #include "zd_chip.h" #define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF) static const u32 zd1211_al2230_table[][3] = { RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, }, RF_CHANNEL( 4) = { 0x03e790, 0x0b3331, 0x00000d, }, RF_CHANNEL( 5) = { 0x03f7a0, 0x033331, 0x00000d, }, RF_CHANNEL( 6) = { 0x03f7a0, 0x0b3331, 0x00000d, }, RF_CHANNEL( 7) = { 0x03e7a0, 0x033331, 0x00000d, }, RF_CHANNEL( 8) = { 0x03e7a0, 0x0b3331, 0x00000d, }, RF_CHANNEL( 9) = { 0x03f7b0, 0x033331, 0x00000d, }, RF_CHANNEL(10) = { 0x03f7b0, 0x0b3331, 0x00000d, }, RF_CHANNEL(11) = { 0x03e7b0, 0x033331, 0x00000d, }, RF_CHANNEL(12) = { 0x03e7b0, 0x0b3331, 0x00000d, }, RF_CHANNEL(13) = { 0x03f7c0, 0x033331, 0x00000d, }, RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, }, }; static const u32 zd1211b_al2230_table[][3] = { RF_CHANNEL( 1) = { 0x09efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 2) = { 0x09efc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 3) = { 0x09e7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 4) = { 0x09e7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 5) = { 0x05efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 6) = { 0x05efc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 7) = { 0x05e7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL( 8) = { 0x05e7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL( 9) = { 0x0defc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(10) = { 0x0defc0, 0x8cccd0, 0xb00000, }, RF_CHANNEL(11) = { 0x0de7c0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(12) = { 0x0de7c0, 0x8cccd0, 0xb00000, }, RF_CHANNEL(13) = { 0x03efc0, 0x8cccc0, 0xb00000, }, RF_CHANNEL(14) = { 0x03e7c0, 0x866660, 0xb00000, }, }; static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = { { ZD_CR240, 0x57 }, { ZD_CR9, 0xe0 }, }; static const struct zd_ioreq16 ioreqs_init_al2230s[] = { { ZD_CR47, 0x1e }, /* MARK_002 */ { ZD_CR106, 0x22 }, { ZD_CR107, 0x2a }, /* MARK_002 */ { ZD_CR109, 0x13 }, /* MARK_002 */ { ZD_CR118, 0xf8 }, /* MARK_002 */ { ZD_CR119, 0x12 }, { ZD_CR122, 0xe0 }, { ZD_CR128, 0x10 }, /* MARK_001 from 0xe->0x10 */ { ZD_CR129, 0x0e }, /* MARK_001 from 0xd->0x0e */ { ZD_CR130, 0x10 }, /* MARK_001 from 0xb->0x0d */ }; static int zd1211b_al2230_finalize_rf(struct zd_chip *chip) { int r; static const struct zd_ioreq16 ioreqs[] = { { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 }, { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 }, { ZD_CR203, 0x06 }, { }, { ZD_CR240, 0x80 }, }; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; /* related to antenna selection? */ if (chip->new_phy_layout) { r = zd_iowrite16_locked(chip, 0xe1, ZD_CR9); if (r) return r; } return zd_iowrite16_locked(chip, 0x06, ZD_CR203); } static int zd1211_al2230_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs_init[] = { { ZD_CR15, 0x20 }, { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x11 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR44, 0x33 }, { ZD_CR106, 0x2a }, { ZD_CR107, 0x1a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x27 }, { ZD_CR111, 0x2b }, { ZD_CR112, 0x2b }, { ZD_CR119, 0x0a }, { ZD_CR10, 0x89 }, /* for newest (3rd cut) AL2300 */ { ZD_CR17, 0x28 }, { ZD_CR26, 0x93 }, { ZD_CR34, 0x30 }, /* for newest (3rd cut) AL2300 */ { ZD_CR35, 0x3e }, { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, /* for newest (3rd cut) AL2300 */ { ZD_CR46, 0x96 }, { ZD_CR47, 0x1e }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR92, 0x0a }, { ZD_CR99, 0x28 }, { ZD_CR100, 0x00 }, { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x24 }, { ZD_CR107, 0x2a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x13 }, { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, /* for newest (3rd cut) AL2300 */ { ZD_CR115, 0x24 }, { ZD_CR116, 0x24 }, { ZD_CR117, 0xf4 }, { ZD_CR118, 0xfc }, { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x77 }, { ZD_CR122, 0xe0 }, { ZD_CR137, 0x88 }, { ZD_CR252, 0xff }, { ZD_CR253, 0xff }, }; static const struct zd_ioreq16 ioreqs_pll[] = { /* shdnb(PLL_ON)=0 */ { ZD_CR251, 0x2f }, /* shdnb(PLL_ON)=1 */ { ZD_CR251, 0x3f }, { ZD_CR138, 0x28 }, { ZD_CR203, 0x06 }, }; static const u32 rv1[] = { /* Channel 1 */ 0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3, }; static const u32 rv2[] = { 0x000da4, 0x0f4dc5, /* fix freq shift, 0x04edc5 */ 0x0805b6, 0x011687, 0x000688, 0x0403b9, /* external control TX power (ZD_CR31) */ 0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00500f, }; static const u32 rv3[] = { 0x00d00f, 0x004c0f, 0x00540f, 0x00700f, 0x00500f, }; r = zd_iowrite16a_locked(chip, ioreqs_init, ARRAY_SIZE(ioreqs_init)); if (r) return r; if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) return r; } r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS); if (r) return r; /* improve band edge for AL2230S */ if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS); if (r) return r; r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_pll, ARRAY_SIZE(ioreqs_pll)); if (r) return r; r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS); if (r) return r; return 0; } static int zd1211b_al2230_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs1[] = { { ZD_CR10, 0x89 }, { ZD_CR15, 0x20 }, { ZD_CR17, 0x2B }, /* for newest(3rd cut) AL2230 */ { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR33, 0x28 }, /* 5621 */ { ZD_CR34, 0x30 }, { ZD_CR35, 0x3e }, /* for newest(3rd cut) AL2230 */ { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, { ZD_CR46, 0x99 }, /* for newest(3rd cut) AL2230 */ { ZD_CR47, 0x1e }, /* ZD1211B 05.06.10 */ { ZD_CR48, 0x06 }, { ZD_CR49, 0xf9 }, { ZD_CR51, 0x01 }, { ZD_CR52, 0x80 }, { ZD_CR53, 0x7e }, { ZD_CR65, 0x00 }, { ZD_CR66, 0x00 }, { ZD_CR67, 0x00 }, { ZD_CR68, 0x00 }, { ZD_CR69, 0x28 }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR91, 0x00 }, /* 5621 */ { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d }, /* 4804, for 1212 new algorithm */ { ZD_CR99, 0x00 }, /* 5621 */ { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x24 }, /* for newest(3rd cut) AL2230 */ { ZD_CR107, 0x2a }, { ZD_CR109, 0x13 }, /* 4804, for 1212 new algorithm */ { ZD_CR110, 0x1f }, /* 4804, for 1212 new algorithm */ { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, { ZD_CR115, 0x26 }, /* 24->26 at 4902 for newest(3rd cut) * AL2230 */ { ZD_CR116, 0x24 }, { ZD_CR117, 0xfa }, /* for 1211b */ { ZD_CR118, 0xfa }, /* for 1211b */ { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x6c }, /* for 1211b */ { ZD_CR122, 0xfc }, /* E0->FC at 4902 */ { ZD_CR123, 0x57 }, /* 5623 */ { ZD_CR125, 0xad }, /* 4804, for 1212 new algorithm */ { ZD_CR126, 0x6c }, /* 5614 */ { ZD_CR127, 0x03 }, /* 4804, for 1212 new algorithm */ { ZD_CR137, 0x50 }, /* 5614 */ { ZD_CR138, 0xa8 }, { ZD_CR144, 0xac }, /* 5621 */ { ZD_CR150, 0x0d }, { ZD_CR252, 0x34 }, { ZD_CR253, 0x34 }, }; static const u32 rv1[] = { 0x8cccd0, 0x481dc0, 0xcfff00, 0x25a000, }; static const u32 rv2[] = { /* To improve AL2230 yield, improve phase noise, 4713 */ 0x25a000, 0xa3b2f0, 0x6da010, /* Reg6 update for MP versio */ 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */ 0x116000, 0x9dc020, /* External control TX power (ZD_CR31) */ 0x5ddb00, /* RegA update for MP version */ 0xd99000, /* RegB update for MP version */ 0x3ffbd0, /* RegC update for MP version */ 0xb00000, /* RegD update for MP version */ /* improve phase noise and remove phase calibration,4713 */ 0xf01a00, }; static const struct zd_ioreq16 ioreqs2[] = { { ZD_CR251, 0x2f }, /* shdnb(PLL_ON)=0 */ { ZD_CR251, 0x7f }, /* shdnb(PLL_ON)=1 */ }; static const u32 rv3[] = { /* To improve AL2230 yield, 4713 */ 0xf01b00, 0xf01e00, 0xf01a00, }; static const struct zd_ioreq16 ioreqs3[] = { /* related to 6M band edge patching, happens unconditionally */ { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, }; r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, ARRAY_SIZE(zd1211b_ioreqs_shared_1)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1)); if (r) return r; if (IS_AL2230S(chip)) { r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, ARRAY_SIZE(ioreqs_init_al2230s)); if (r) return r; } r = zd_rfwritev_cr_locked(chip, zd1211b_al2230_table[0], 3); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv1, ARRAY_SIZE(rv1)); if (r) return r; if (IS_AL2230S(chip)) r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS); else r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv2, ARRAY_SIZE(rv2)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv3, ARRAY_SIZE(rv3)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3)); if (r) return r; return zd1211b_al2230_finalize_rf(chip); } static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = zd1211_al2230_table[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR138, 0x28 }, { ZD_CR203, 0x06 }, }; r = zd_rfwritev_locked(chip, rv, 3, RF_RV_BITS); if (r) return r; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al2230_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = zd1211b_al2230_table[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1, ARRAY_SIZE(zd1211b_ioreqs_shared_1)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv, 3); if (r) return r; return zd1211b_al2230_finalize_rf(chip); } static int zd1211_al2230_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al2230_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x7f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int al2230_switch_radio_off(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } int zd_rf_init_al2230(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); rf->switch_radio_off = al2230_switch_radio_off; if (zd_chip_is_zd1211b(chip)) { rf->init_hw = zd1211b_al2230_init_hw; rf->set_channel = zd1211b_al2230_set_channel; rf->switch_radio_on = zd1211b_al2230_switch_radio_on; } else { rf->init_hw = zd1211_al2230_init_hw; rf->set_channel = zd1211_al2230_set_channel; rf->switch_radio_on = zd1211_al2230_switch_radio_on; } rf->patch_6m_band_edge = zd_rf_generic_patch_6m; rf->patch_cck_gain = 1; return 0; }
gpl-2.0
notro/linux-staging
drivers/staging/iio/adc/mxs-lradc.c
350
47853
/* * Freescale i.MX28 LRADC driver * * Copyright (c) 2012 DENX Software Engineering, GmbH. * Marek Vasut <marex@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/stmp_device.h> #include <linux/bitops.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #define DRIVER_NAME "mxs-lradc" #define LRADC_MAX_DELAY_CHANS 4 #define LRADC_MAX_MAPPED_CHANS 8 #define LRADC_MAX_TOTAL_CHANS 16 #define LRADC_DELAY_TIMER_HZ 2000 /* * Make this runtime configurable if necessary. Currently, if the buffered mode * is enabled, the LRADC takes LRADC_DELAY_TIMER_LOOP samples of data before * triggering IRQ. The sampling happens every (LRADC_DELAY_TIMER_PER / 2000) * seconds. The result is that the samples arrive every 500mS. */ #define LRADC_DELAY_TIMER_PER 200 #define LRADC_DELAY_TIMER_LOOP 5 /* * Once the pen touches the touchscreen, the touchscreen switches from * IRQ-driven mode to polling mode to prevent interrupt storm. The polling * is realized by worker thread, which is called every 20 or so milliseconds. * This gives the touchscreen enough fluence and does not strain the system * too much. */ #define LRADC_TS_SAMPLE_DELAY_MS 5 /* * The LRADC reads the following amount of samples from each touchscreen * channel and the driver then computes avarage of these. */ #define LRADC_TS_SAMPLE_AMOUNT 4 enum mxs_lradc_id { IMX23_LRADC, IMX28_LRADC, }; static const char * const mx23_lradc_irq_names[] = { "mxs-lradc-touchscreen", "mxs-lradc-channel0", "mxs-lradc-channel1", "mxs-lradc-channel2", "mxs-lradc-channel3", "mxs-lradc-channel4", "mxs-lradc-channel5", "mxs-lradc-channel6", "mxs-lradc-channel7", }; static const char * const mx28_lradc_irq_names[] = { "mxs-lradc-touchscreen", "mxs-lradc-thresh0", "mxs-lradc-thresh1", "mxs-lradc-channel0", "mxs-lradc-channel1", "mxs-lradc-channel2", "mxs-lradc-channel3", "mxs-lradc-channel4", "mxs-lradc-channel5", "mxs-lradc-channel6", "mxs-lradc-channel7", "mxs-lradc-button0", "mxs-lradc-button1", }; struct mxs_lradc_of_config { const int irq_count; const char * const *irq_name; const uint32_t *vref_mv; }; #define VREF_MV_BASE 1850 static const uint32_t mx23_vref_mv[LRADC_MAX_TOTAL_CHANS] = { VREF_MV_BASE, /* CH0 */ VREF_MV_BASE, /* CH1 */ VREF_MV_BASE, /* CH2 */ VREF_MV_BASE, /* CH3 */ VREF_MV_BASE, /* CH4 */ VREF_MV_BASE, /* CH5 */ VREF_MV_BASE * 2, /* CH6 VDDIO */ VREF_MV_BASE * 4, /* CH7 VBATT */ VREF_MV_BASE, /* CH8 Temp sense 0 */ VREF_MV_BASE, /* CH9 Temp sense 1 */ VREF_MV_BASE, /* CH10 */ VREF_MV_BASE, /* CH11 */ VREF_MV_BASE, /* CH12 USB_DP */ VREF_MV_BASE, /* CH13 USB_DN */ VREF_MV_BASE, /* CH14 VBG */ VREF_MV_BASE * 4, /* CH15 VDD5V */ }; static const uint32_t mx28_vref_mv[LRADC_MAX_TOTAL_CHANS] = { VREF_MV_BASE, /* CH0 */ VREF_MV_BASE, /* CH1 */ VREF_MV_BASE, /* CH2 */ VREF_MV_BASE, /* CH3 */ VREF_MV_BASE, /* CH4 */ VREF_MV_BASE, /* CH5 */ VREF_MV_BASE, /* CH6 */ VREF_MV_BASE * 4, /* CH7 VBATT */ VREF_MV_BASE, /* CH8 Temp sense 0 */ VREF_MV_BASE, /* CH9 Temp sense 1 */ VREF_MV_BASE * 2, /* CH10 VDDIO */ VREF_MV_BASE, /* CH11 VTH */ VREF_MV_BASE * 2, /* CH12 VDDA */ VREF_MV_BASE, /* CH13 VDDD */ VREF_MV_BASE, /* CH14 VBG */ VREF_MV_BASE * 4, /* CH15 VDD5V */ }; static const struct mxs_lradc_of_config mxs_lradc_of_config[] = { [IMX23_LRADC] = { .irq_count = ARRAY_SIZE(mx23_lradc_irq_names), .irq_name = mx23_lradc_irq_names, .vref_mv = mx23_vref_mv, }, [IMX28_LRADC] = { .irq_count = ARRAY_SIZE(mx28_lradc_irq_names), .irq_name = mx28_lradc_irq_names, .vref_mv = mx28_vref_mv, }, }; enum mxs_lradc_ts { MXS_LRADC_TOUCHSCREEN_NONE = 0, MXS_LRADC_TOUCHSCREEN_4WIRE, MXS_LRADC_TOUCHSCREEN_5WIRE, }; /* * Touchscreen handling */ enum lradc_ts_plate { LRADC_TOUCH = 0, LRADC_SAMPLE_X, LRADC_SAMPLE_Y, LRADC_SAMPLE_PRESSURE, LRADC_SAMPLE_VALID, }; enum mxs_lradc_divbytwo { MXS_LRADC_DIV_DISABLED = 0, MXS_LRADC_DIV_ENABLED, }; struct mxs_lradc_scale { unsigned int integer; unsigned int nano; }; struct mxs_lradc { struct device *dev; void __iomem *base; int irq[13]; struct clk *clk; uint32_t *buffer; struct iio_trigger *trig; struct mutex lock; struct completion completion; const uint32_t *vref_mv; struct mxs_lradc_scale scale_avail[LRADC_MAX_TOTAL_CHANS][2]; unsigned long is_divided; /* * When the touchscreen is enabled, we give it two private virtual * channels: #6 and #7. This means that only 6 virtual channels (instead * of 8) will be available for buffered capture. */ #define TOUCHSCREEN_VCHANNEL1 7 #define TOUCHSCREEN_VCHANNEL2 6 #define BUFFER_VCHANS_LIMITED 0x3f #define BUFFER_VCHANS_ALL 0xff u8 buffer_vchans; /* * Furthermore, certain LRADC channels are shared between touchscreen * and/or touch-buttons and generic LRADC block. Therefore when using * either of these, these channels are not available for the regular * sampling. The shared channels are as follows: * * CH0 -- Touch button #0 * CH1 -- Touch button #1 * CH2 -- Touch screen XPUL * CH3 -- Touch screen YPLL * CH4 -- Touch screen XNUL * CH5 -- Touch screen YNLR * CH6 -- Touch screen WIPER (5-wire only) * * The bitfields below represents which parts of the LRADC block are * switched into special mode of operation. These channels can not * be sampled as regular LRADC channels. The driver will refuse any * attempt to sample these channels. */ #define CHAN_MASK_TOUCHBUTTON (BIT(1) | BIT(0)) #define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2) #define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2) enum mxs_lradc_ts use_touchscreen; bool use_touchbutton; struct input_dev *ts_input; enum mxs_lradc_id soc; enum lradc_ts_plate cur_plate; /* statemachine */ bool ts_valid; unsigned ts_x_pos; unsigned ts_y_pos; unsigned ts_pressure; /* handle touchscreen's physical behaviour */ /* samples per coordinate */ unsigned over_sample_cnt; /* time clocks between samples */ unsigned over_sample_delay; /* time in clocks to wait after the plates where switched */ unsigned settling_delay; }; #define LRADC_CTRL0 0x00 # define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE BIT(23) # define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE BIT(22) # define LRADC_CTRL0_MX28_YNNSW /* YM */ BIT(21) # define LRADC_CTRL0_MX28_YPNSW /* YP */ BIT(20) # define LRADC_CTRL0_MX28_YPPSW /* YP */ BIT(19) # define LRADC_CTRL0_MX28_XNNSW /* XM */ BIT(18) # define LRADC_CTRL0_MX28_XNPSW /* XM */ BIT(17) # define LRADC_CTRL0_MX28_XPPSW /* XP */ BIT(16) # define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE BIT(20) # define LRADC_CTRL0_MX23_YM BIT(19) # define LRADC_CTRL0_MX23_XM BIT(18) # define LRADC_CTRL0_MX23_YP BIT(17) # define LRADC_CTRL0_MX23_XP BIT(16) # define LRADC_CTRL0_MX28_PLATE_MASK \ (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \ LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \ LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \ LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW) # define LRADC_CTRL0_MX23_PLATE_MASK \ (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \ LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \ LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP) #define LRADC_CTRL1 0x10 #define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN BIT(24) #define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16)) #define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16) #define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16) #define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16 #define LRADC_CTRL1_TOUCH_DETECT_IRQ BIT(8) #define LRADC_CTRL1_LRADC_IRQ(n) (1 << (n)) #define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff #define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff #define LRADC_CTRL1_LRADC_IRQ_OFFSET 0 #define LRADC_CTRL2 0x20 #define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24 #define LRADC_CTRL2_TEMPSENSE_PWD BIT(15) #define LRADC_STATUS 0x40 #define LRADC_STATUS_TOUCH_DETECT_RAW BIT(0) #define LRADC_CH(n) (0x50 + (0x10 * (n))) #define LRADC_CH_ACCUMULATE BIT(29) #define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24) #define LRADC_CH_NUM_SAMPLES_OFFSET 24 #define LRADC_CH_NUM_SAMPLES(x) \ ((x) << LRADC_CH_NUM_SAMPLES_OFFSET) #define LRADC_CH_VALUE_MASK 0x3ffff #define LRADC_CH_VALUE_OFFSET 0 #define LRADC_DELAY(n) (0xd0 + (0x10 * (n))) #define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xff << 24) #define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24 #define LRADC_DELAY_TRIGGER(x) \ (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \ LRADC_DELAY_TRIGGER_LRADCS_MASK) #define LRADC_DELAY_KICK (1 << 20) #define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16) #define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16 #define LRADC_DELAY_TRIGGER_DELAYS(x) \ (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \ LRADC_DELAY_TRIGGER_DELAYS_MASK) #define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11) #define LRADC_DELAY_LOOP_COUNT_OFFSET 11 #define LRADC_DELAY_LOOP(x) \ (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \ LRADC_DELAY_LOOP_COUNT_MASK) #define LRADC_DELAY_DELAY_MASK 0x7ff #define LRADC_DELAY_DELAY_OFFSET 0 #define LRADC_DELAY_DELAY(x) \ (((x) << LRADC_DELAY_DELAY_OFFSET) & \ LRADC_DELAY_DELAY_MASK) #define LRADC_CTRL4 0x140 #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) #define LRADC_CTRL4_LRADCSELECT(n, x) \ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ LRADC_CTRL4_LRADCSELECT_MASK(n)) #define LRADC_RESOLUTION 12 #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) static void mxs_lradc_reg_set(struct mxs_lradc *lradc, u32 val, u32 reg) { writel(val, lradc->base + reg + STMP_OFFSET_REG_SET); } static void mxs_lradc_reg_clear(struct mxs_lradc *lradc, u32 val, u32 reg) { writel(val, lradc->base + reg + STMP_OFFSET_REG_CLR); } static void mxs_lradc_reg_wrt(struct mxs_lradc *lradc, u32 val, u32 reg) { writel(val, lradc->base + reg); } static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL0_MX23_PLATE_MASK; return LRADC_CTRL0_MX28_PLATE_MASK; } static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK; return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK; } static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL1_MX23_LRADC_IRQ_MASK; return LRADC_CTRL1_MX28_LRADC_IRQ_MASK; } static u32 mxs_lradc_touch_detect_bit(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE; return LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE; } static u32 mxs_lradc_drive_x_plate(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL0_MX23_XP | LRADC_CTRL0_MX23_XM; return LRADC_CTRL0_MX28_XPPSW | LRADC_CTRL0_MX28_XNNSW; } static u32 mxs_lradc_drive_y_plate(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_YM; return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_YNNSW; } static u32 mxs_lradc_drive_pressure(struct mxs_lradc *lradc) { if (lradc->soc == IMX23_LRADC) return LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XM; return LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW; } static bool mxs_lradc_check_touch_event(struct mxs_lradc *lradc) { return !!(readl(lradc->base + LRADC_STATUS) & LRADC_STATUS_TOUCH_DETECT_RAW); } static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch, unsigned ch) { mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch), LRADC_CTRL4); mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4); } static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) { /* * prepare for oversampling conversion * * from the datasheet: * "The ACCUMULATE bit in the appropriate channel register * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0; * otherwise, the IRQs will not fire." */ mxs_lradc_reg_wrt(lradc, LRADC_CH_ACCUMULATE | LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1), LRADC_CH(ch)); /* from the datasheet: * "Software must clear this register in preparation for a * multi-cycle accumulation. */ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch)); /* * prepare the delay/loop unit according to the oversampling count * * from the datasheet: * "The DELAY fields in HW_LRADC_DELAY0, HW_LRADC_DELAY1, * HW_LRADC_DELAY2, and HW_LRADC_DELAY3 must be non-zero; otherwise, * the LRADC will not trigger the delay group." */ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch) | LRADC_DELAY_TRIGGER_DELAYS(0) | LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), LRADC_DELAY(3)); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1); /* * after changing the touchscreen plates setting * the signals need some initial time to settle. Start the * SoC's delay unit and start the conversion later * and automatically. */ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */ LRADC_DELAY_TRIGGER_DELAYS(BIT(3)) | /* trigger DELAY unit#3 */ LRADC_DELAY_KICK | LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2)); } /* * Pressure detection is special: * We want to do both required measurements for the pressure detection in * one turn. Use the hardware features to chain both conversions and let the * hardware report one interrupt if both conversions are done */ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, unsigned ch2) { u32 reg; /* * prepare for oversampling conversion * * from the datasheet: * "The ACCUMULATE bit in the appropriate channel register * HW_LRADC_CHn must be set to 1 if NUM_SAMPLES is greater then 0; * otherwise, the IRQs will not fire." */ reg = LRADC_CH_ACCUMULATE | LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1); mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1)); mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2)); /* from the datasheet: * "Software must clear this register in preparation for a * multi-cycle accumulation. */ mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch1)); mxs_lradc_reg_clear(lradc, LRADC_CH_VALUE_MASK, LRADC_CH(ch2)); /* prepare the delay/loop unit according to the oversampling count */ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << ch1) | LRADC_DELAY_TRIGGER(1 << ch2) | /* start both channels */ LRADC_DELAY_TRIGGER_DELAYS(0) | LRADC_DELAY_LOOP(lradc->over_sample_cnt - 1) | LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), LRADC_DELAY(3)); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1); /* * after changing the touchscreen plates setting * the signals need some initial time to settle. Start the * SoC's delay unit and start the conversion later * and automatically. */ mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(0) | /* don't trigger ADC */ LRADC_DELAY_TRIGGER_DELAYS(BIT(3)) | /* trigger DELAY unit#3 */ LRADC_DELAY_KICK | LRADC_DELAY_DELAY(lradc->settling_delay), LRADC_DELAY(2)); } static unsigned mxs_lradc_read_raw_channel(struct mxs_lradc *lradc, unsigned channel) { u32 reg; unsigned num_samples, val; reg = readl(lradc->base + LRADC_CH(channel)); if (reg & LRADC_CH_ACCUMULATE) num_samples = lradc->over_sample_cnt; else num_samples = 1; val = (reg & LRADC_CH_VALUE_MASK) >> LRADC_CH_VALUE_OFFSET; return val / num_samples; } static unsigned mxs_lradc_read_ts_pressure(struct mxs_lradc *lradc, unsigned ch1, unsigned ch2) { u32 reg, mask; unsigned pressure, m1, m2; mask = LRADC_CTRL1_LRADC_IRQ(ch1) | LRADC_CTRL1_LRADC_IRQ(ch2); reg = readl(lradc->base + LRADC_CTRL1) & mask; while (reg != mask) { reg = readl(lradc->base + LRADC_CTRL1) & mask; dev_dbg(lradc->dev, "One channel is still busy: %X\n", reg); } m1 = mxs_lradc_read_raw_channel(lradc, ch1); m2 = mxs_lradc_read_raw_channel(lradc, ch2); if (m2 == 0) { dev_warn(lradc->dev, "Cannot calculate pressure\n"); return 1 << (LRADC_RESOLUTION - 1); } /* simply scale the value from 0 ... max ADC resolution */ pressure = m1; pressure *= (1 << LRADC_RESOLUTION); pressure /= m2; dev_dbg(lradc->dev, "Pressure = %u\n", pressure); return pressure; } #define TS_CH_XP 2 #define TS_CH_YP 3 #define TS_CH_XM 4 #define TS_CH_YM 5 /* * YP(open)--+-------------+ * | |--+ * | | | * YM(-)--+-------------+ | * +--------------+ * | | * XP(weak+) XM(open) * * "weak+" means 200k Ohm VDDIO * (-) means GND */ static void mxs_lradc_setup_touch_detection(struct mxs_lradc *lradc) { /* * In order to detect a touch event the 'touch detect enable' bit * enables: * - a weak pullup to the X+ connector * - a strong ground at the Y- connector */ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); mxs_lradc_reg_set(lradc, mxs_lradc_touch_detect_bit(lradc), LRADC_CTRL0); } /* * YP(meas)--+-------------+ * | |--+ * | | | * YM(open)--+-------------+ | * +--------------+ * | | * XP(+) XM(-) * * (+) means here 1.85 V * (-) means here GND */ static void mxs_lradc_prepare_x_pos(struct mxs_lradc *lradc) { mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_X; mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP); mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); } /* * YP(+)--+-------------+ * | |--+ * | | | * YM(-)--+-------------+ | * +--------------+ * | | * XP(open) XM(meas) * * (+) means here 1.85 V * (-) means here GND */ static void mxs_lradc_prepare_y_pos(struct mxs_lradc *lradc) { mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_Y; mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM); mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); } /* * YP(+)--+-------------+ * | |--+ * | | | * YM(meas)--+-------------+ | * +--------------+ * | | * XP(meas) XM(-) * * (+) means here 1.85 V * (-) means here GND */ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc) { mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_PRESSURE; mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM); mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP); mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, TOUCHSCREEN_VCHANNEL1); } static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) { mxs_lradc_setup_touch_detection(lradc); lradc->cur_plate = LRADC_TOUCH; mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); } static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc) { mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); /* * start with the Y-pos, because it uses nearly the same plate * settings like the touch detection */ mxs_lradc_prepare_y_pos(lradc); } static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) { input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); input_report_abs(lradc->ts_input, ABS_Y, lradc->ts_y_pos); input_report_abs(lradc->ts_input, ABS_PRESSURE, lradc->ts_pressure); input_report_key(lradc->ts_input, BTN_TOUCH, 1); input_sync(lradc->ts_input); } static void mxs_lradc_complete_touch_event(struct mxs_lradc *lradc) { mxs_lradc_setup_touch_detection(lradc); lradc->cur_plate = LRADC_SAMPLE_VALID; /* * start a dummy conversion to burn time to settle the signals * note: we are not interested in the conversion's value */ mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1)); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ LRADC_DELAY(2)); } /* * in order to avoid false measurements, report only samples where * the surface is still touched after the position measurement */ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid) { /* if it is still touched, report the sample */ if (valid && mxs_lradc_check_touch_event(lradc)) { lradc->ts_valid = true; mxs_lradc_report_ts_event(lradc); } /* if it is even still touched, continue with the next measurement */ if (mxs_lradc_check_touch_event(lradc)) { mxs_lradc_prepare_y_pos(lradc); return; } if (lradc->ts_valid) { /* signal the release */ lradc->ts_valid = false; input_report_key(lradc->ts_input, BTN_TOUCH, 0); input_sync(lradc->ts_input); } /* if it is released, wait for the next touch via IRQ */ lradc->cur_plate = LRADC_TOUCH; mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); } /* touchscreen's state machine */ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) { switch (lradc->cur_plate) { case LRADC_TOUCH: if (mxs_lradc_check_touch_event(lradc)) mxs_lradc_start_touch_event(lradc); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); return; case LRADC_SAMPLE_Y: lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc, TOUCHSCREEN_VCHANNEL1); mxs_lradc_prepare_x_pos(lradc); return; case LRADC_SAMPLE_X: lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc, TOUCHSCREEN_VCHANNEL1); mxs_lradc_prepare_pressure(lradc); return; case LRADC_SAMPLE_PRESSURE: lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, TOUCHSCREEN_VCHANNEL1); mxs_lradc_complete_touch_event(lradc); return; case LRADC_SAMPLE_VALID: mxs_lradc_finish_touch_event(lradc, 1); break; } } /* * Raw I/O operations */ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val) { struct mxs_lradc *lradc = iio_priv(iio_dev); int ret; /* * See if there is no buffered operation in progess. If there is, simply * bail out. This can be improved to support both buffered and raw IO at * the same time, yet the code becomes horribly complicated. Therefore I * applied KISS principle here. */ ret = mutex_trylock(&lradc->lock); if (!ret) return -EBUSY; reinit_completion(&lradc->completion); /* * No buffered operation in progress, map the channel and trigger it. * Virtual channel 0 is always used here as the others are always not * used if doing raw sampling. */ if (lradc->soc == IMX28_LRADC) mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1); mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0); /* Enable / disable the divider per requirement */ if (test_bit(chan, &lradc->is_divided)) mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2); else mxs_lradc_reg_clear(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2); /* Clean the slot's previous content, then set new one. */ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4); mxs_lradc_reg_set(lradc, chan, LRADC_CTRL4); mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(0)); /* Enable the IRQ and start sampling the channel. */ mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1); mxs_lradc_reg_set(lradc, BIT(0), LRADC_CTRL0); /* Wait for completion on the channel, 1 second max. */ ret = wait_for_completion_killable_timeout(&lradc->completion, HZ); if (!ret) ret = -ETIMEDOUT; if (ret < 0) goto err; /* Read the data. */ *val = readl(lradc->base + LRADC_CH(0)) & LRADC_CH_VALUE_MASK; ret = IIO_VAL_INT; err: mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1); mutex_unlock(&lradc->lock); return ret; } static int mxs_lradc_read_temp(struct iio_dev *iio_dev, int *val) { int ret, min, max; ret = mxs_lradc_read_single(iio_dev, 8, &min); if (ret != IIO_VAL_INT) return ret; ret = mxs_lradc_read_single(iio_dev, 9, &max); if (ret != IIO_VAL_INT) return ret; *val = max - min; return IIO_VAL_INT; } static int mxs_lradc_read_raw(struct iio_dev *iio_dev, const struct iio_chan_spec *chan, int *val, int *val2, long m) { struct mxs_lradc *lradc = iio_priv(iio_dev); switch (m) { case IIO_CHAN_INFO_RAW: if (chan->type == IIO_TEMP) return mxs_lradc_read_temp(iio_dev, val); return mxs_lradc_read_single(iio_dev, chan->channel, val); case IIO_CHAN_INFO_SCALE: if (chan->type == IIO_TEMP) { /* From the datasheet, we have to multiply by 1.012 and * divide by 4 */ *val = 0; *val2 = 253000; return IIO_VAL_INT_PLUS_MICRO; } *val = lradc->vref_mv[chan->channel]; *val2 = chan->scan_type.realbits - test_bit(chan->channel, &lradc->is_divided); return IIO_VAL_FRACTIONAL_LOG2; case IIO_CHAN_INFO_OFFSET: if (chan->type == IIO_TEMP) { /* The calculated value from the ADC is in Kelvin, we * want Celsius for hwmon so the offset is * -272.15 * scale */ *val = -1075; *val2 = 691699; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; default: break; } return -EINVAL; } static int mxs_lradc_write_raw(struct iio_dev *iio_dev, const struct iio_chan_spec *chan, int val, int val2, long m) { struct mxs_lradc *lradc = iio_priv(iio_dev); struct mxs_lradc_scale *scale_avail = lradc->scale_avail[chan->channel]; int ret; ret = mutex_trylock(&lradc->lock); if (!ret) return -EBUSY; switch (m) { case IIO_CHAN_INFO_SCALE: ret = -EINVAL; if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer && val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) { /* divider by two disabled */ clear_bit(chan->channel, &lradc->is_divided); ret = 0; } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer && val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) { /* divider by two enabled */ set_bit(chan->channel, &lradc->is_divided); ret = 0; } break; default: ret = -EINVAL; break; } mutex_unlock(&lradc->lock); return ret; } static int mxs_lradc_write_raw_get_fmt(struct iio_dev *iio_dev, const struct iio_chan_spec *chan, long m) { return IIO_VAL_INT_PLUS_NANO; } static ssize_t mxs_lradc_show_scale_available_ch(struct device *dev, struct device_attribute *attr, char *buf, int ch) { struct iio_dev *iio = dev_to_iio_dev(dev); struct mxs_lradc *lradc = iio_priv(iio); int i, len = 0; for (i = 0; i < ARRAY_SIZE(lradc->scale_avail[ch]); i++) len += sprintf(buf + len, "%u.%09u ", lradc->scale_avail[ch][i].integer, lradc->scale_avail[ch][i].nano); len += sprintf(buf + len, "\n"); return len; } static ssize_t mxs_lradc_show_scale_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev_attr *iio_attr = to_iio_dev_attr(attr); return mxs_lradc_show_scale_available_ch(dev, attr, buf, iio_attr->address); } #define SHOW_SCALE_AVAILABLE_ATTR(ch) \ static IIO_DEVICE_ATTR(in_voltage##ch##_scale_available, S_IRUGO, \ mxs_lradc_show_scale_available, NULL, ch) SHOW_SCALE_AVAILABLE_ATTR(0); SHOW_SCALE_AVAILABLE_ATTR(1); SHOW_SCALE_AVAILABLE_ATTR(2); SHOW_SCALE_AVAILABLE_ATTR(3); SHOW_SCALE_AVAILABLE_ATTR(4); SHOW_SCALE_AVAILABLE_ATTR(5); SHOW_SCALE_AVAILABLE_ATTR(6); SHOW_SCALE_AVAILABLE_ATTR(7); SHOW_SCALE_AVAILABLE_ATTR(10); SHOW_SCALE_AVAILABLE_ATTR(11); SHOW_SCALE_AVAILABLE_ATTR(12); SHOW_SCALE_AVAILABLE_ATTR(13); SHOW_SCALE_AVAILABLE_ATTR(14); SHOW_SCALE_AVAILABLE_ATTR(15); static struct attribute *mxs_lradc_attributes[] = { &iio_dev_attr_in_voltage0_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage1_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage2_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage3_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage4_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage5_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage6_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage7_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage10_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage11_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage12_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage13_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage14_scale_available.dev_attr.attr, &iio_dev_attr_in_voltage15_scale_available.dev_attr.attr, NULL }; static const struct attribute_group mxs_lradc_attribute_group = { .attrs = mxs_lradc_attributes, }; static const struct iio_info mxs_lradc_iio_info = { .driver_module = THIS_MODULE, .read_raw = mxs_lradc_read_raw, .write_raw = mxs_lradc_write_raw, .write_raw_get_fmt = mxs_lradc_write_raw_get_fmt, .attrs = &mxs_lradc_attribute_group, }; static int mxs_lradc_ts_open(struct input_dev *dev) { struct mxs_lradc *lradc = input_get_drvdata(dev); /* Enable the touch-detect circuitry. */ mxs_lradc_enable_touch_detection(lradc); return 0; } static void mxs_lradc_disable_ts(struct mxs_lradc *lradc) { /* stop all interrupts from firing */ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); /* Power-down touchscreen touch-detect circuitry. */ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); } static void mxs_lradc_ts_close(struct input_dev *dev) { struct mxs_lradc *lradc = input_get_drvdata(dev); mxs_lradc_disable_ts(lradc); } static int mxs_lradc_ts_register(struct mxs_lradc *lradc) { struct input_dev *input; struct device *dev = lradc->dev; int ret; if (!lradc->use_touchscreen) return 0; input = input_allocate_device(); if (!input) return -ENOMEM; input->name = DRIVER_NAME; input->id.bustype = BUS_HOST; input->dev.parent = dev; input->open = mxs_lradc_ts_open; input->close = mxs_lradc_ts_close; __set_bit(EV_ABS, input->evbit); __set_bit(EV_KEY, input->evbit); __set_bit(BTN_TOUCH, input->keybit); input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0); input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0); lradc->ts_input = input; input_set_drvdata(input, lradc); ret = input_register_device(input); if (ret) input_free_device(lradc->ts_input); return ret; } static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc) { if (!lradc->use_touchscreen) return; mxs_lradc_disable_ts(lradc); input_unregister_device(lradc->ts_input); } /* * IRQ Handling */ static irqreturn_t mxs_lradc_handle_irq(int irq, void *data) { struct iio_dev *iio = data; struct mxs_lradc *lradc = iio_priv(iio); unsigned long reg = readl(lradc->base + LRADC_CTRL1); uint32_t clr_irq = mxs_lradc_irq_mask(lradc); const uint32_t ts_irq_mask = LRADC_CTRL1_TOUCH_DETECT_IRQ | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2); if (!(reg & mxs_lradc_irq_mask(lradc))) return IRQ_NONE; if (lradc->use_touchscreen && (reg & ts_irq_mask)) { mxs_lradc_handle_touch(lradc); /* Make sure we don't clear the next conversion's interrupt. */ clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2)); } if (iio_buffer_enabled(iio)) { if (reg & lradc->buffer_vchans) iio_trigger_poll(iio->trig); } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) { complete(&lradc->completion); } mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1); return IRQ_HANDLED; } /* * Trigger handling */ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *iio = pf->indio_dev; struct mxs_lradc *lradc = iio_priv(iio); const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); unsigned int i, j = 0; for_each_set_bit(i, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { lradc->buffer[j] = readl(lradc->base + LRADC_CH(j)); mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(j)); lradc->buffer[j] &= LRADC_CH_VALUE_MASK; lradc->buffer[j] /= LRADC_DELAY_TIMER_LOOP; j++; } iio_push_to_buffers_with_timestamp(iio, lradc->buffer, pf->timestamp); iio_trigger_notify_done(iio->trig); return IRQ_HANDLED; } static int mxs_lradc_configure_trigger(struct iio_trigger *trig, bool state) { struct iio_dev *iio = iio_trigger_get_drvdata(trig); struct mxs_lradc *lradc = iio_priv(iio); const uint32_t st = state ? STMP_OFFSET_REG_SET : STMP_OFFSET_REG_CLR; mxs_lradc_reg_wrt(lradc, LRADC_DELAY_KICK, LRADC_DELAY(0) + st); return 0; } static const struct iio_trigger_ops mxs_lradc_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &mxs_lradc_configure_trigger, }; static int mxs_lradc_trigger_init(struct iio_dev *iio) { int ret; struct iio_trigger *trig; struct mxs_lradc *lradc = iio_priv(iio); trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id); if (trig == NULL) return -ENOMEM; trig->dev.parent = lradc->dev; iio_trigger_set_drvdata(trig, iio); trig->ops = &mxs_lradc_trigger_ops; ret = iio_trigger_register(trig); if (ret) { iio_trigger_free(trig); return ret; } lradc->trig = trig; return 0; } static void mxs_lradc_trigger_remove(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); iio_trigger_unregister(lradc->trig); iio_trigger_free(lradc->trig); } static int mxs_lradc_buffer_preenable(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); int ret = 0, chan, ofs = 0; unsigned long enable = 0; uint32_t ctrl4_set = 0; uint32_t ctrl4_clr = 0; uint32_t ctrl1_irq = 0; const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS); if (!len) return -EINVAL; /* * Lock the driver so raw access can not be done during buffered * operation. This simplifies the code a lot. */ ret = mutex_trylock(&lradc->lock); if (!ret) return -EBUSY; lradc->buffer = kmalloc_array(len, sizeof(*lradc->buffer), GFP_KERNEL); if (!lradc->buffer) { ret = -ENOMEM; goto err_mem; } if (lradc->soc == IMX28_LRADC) mxs_lradc_reg_clear(lradc, lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, LRADC_CTRL1); mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs); ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs); mxs_lradc_reg_wrt(lradc, chan_value, LRADC_CH(ofs)); bitmap_set(&enable, ofs, 1); ofs++; } mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK, LRADC_DELAY(0)); mxs_lradc_reg_clear(lradc, ctrl4_clr, LRADC_CTRL4); mxs_lradc_reg_set(lradc, ctrl4_set, LRADC_CTRL4); mxs_lradc_reg_set(lradc, ctrl1_irq, LRADC_CTRL1); mxs_lradc_reg_set(lradc, enable << LRADC_DELAY_TRIGGER_LRADCS_OFFSET, LRADC_DELAY(0)); return 0; err_mem: mutex_unlock(&lradc->lock); return ret; } static int mxs_lradc_buffer_postdisable(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK, LRADC_DELAY(0)); mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); if (lradc->soc == IMX28_LRADC) mxs_lradc_reg_clear(lradc, lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, LRADC_CTRL1); kfree(lradc->buffer); mutex_unlock(&lradc->lock); return 0; } static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio, const unsigned long *mask) { struct mxs_lradc *lradc = iio_priv(iio); const int map_chans = bitmap_weight(mask, LRADC_MAX_TOTAL_CHANS); int rsvd_chans = 0; unsigned long rsvd_mask = 0; if (lradc->use_touchbutton) rsvd_mask |= CHAN_MASK_TOUCHBUTTON; if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_4WIRE) rsvd_mask |= CHAN_MASK_TOUCHSCREEN_4WIRE; if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE) rsvd_mask |= CHAN_MASK_TOUCHSCREEN_5WIRE; if (lradc->use_touchbutton) rsvd_chans++; if (lradc->use_touchscreen) rsvd_chans += 2; /* Test for attempts to map channels with special mode of operation. */ if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) return false; /* Test for attempts to map more channels then available slots. */ if (map_chans + rsvd_chans > LRADC_MAX_MAPPED_CHANS) return false; return true; } static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = { .preenable = &mxs_lradc_buffer_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, .postdisable = &mxs_lradc_buffer_postdisable, .validate_scan_mask = &mxs_lradc_validate_scan_mask, }; /* * Driver initialization */ #define MXS_ADC_CHAN(idx, chan_type) { \ .type = (chan_type), \ .indexed = 1, \ .scan_index = (idx), \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .channel = (idx), \ .address = (idx), \ .scan_type = { \ .sign = 'u', \ .realbits = LRADC_RESOLUTION, \ .storagebits = 32, \ }, \ } static const struct iio_chan_spec mxs_lradc_chan_spec[] = { MXS_ADC_CHAN(0, IIO_VOLTAGE), MXS_ADC_CHAN(1, IIO_VOLTAGE), MXS_ADC_CHAN(2, IIO_VOLTAGE), MXS_ADC_CHAN(3, IIO_VOLTAGE), MXS_ADC_CHAN(4, IIO_VOLTAGE), MXS_ADC_CHAN(5, IIO_VOLTAGE), MXS_ADC_CHAN(6, IIO_VOLTAGE), MXS_ADC_CHAN(7, IIO_VOLTAGE), /* VBATT */ /* Combined Temperature sensors */ { .type = IIO_TEMP, .indexed = 1, .scan_index = 8, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE), .channel = 8, .scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,}, }, /* Hidden channel to keep indexes */ { .type = IIO_TEMP, .indexed = 1, .scan_index = -1, .channel = 9, }, MXS_ADC_CHAN(10, IIO_VOLTAGE), /* VDDIO */ MXS_ADC_CHAN(11, IIO_VOLTAGE), /* VTH */ MXS_ADC_CHAN(12, IIO_VOLTAGE), /* VDDA */ MXS_ADC_CHAN(13, IIO_VOLTAGE), /* VDDD */ MXS_ADC_CHAN(14, IIO_VOLTAGE), /* VBG */ MXS_ADC_CHAN(15, IIO_VOLTAGE), /* VDD5V */ }; static int mxs_lradc_hw_init(struct mxs_lradc *lradc) { /* The ADC always uses DELAY CHANNEL 0. */ const uint32_t adc_cfg = (1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) | (LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET); int ret = stmp_reset_block(lradc->base); if (ret) return ret; /* Configure DELAY CHANNEL 0 for generic ADC sampling. */ mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0)); /* Disable remaining DELAY CHANNELs */ mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(1)); mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); /* Configure the touchscreen type */ if (lradc->soc == IMX28_LRADC) { mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE, LRADC_CTRL0); if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE) mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE, LRADC_CTRL0); } /* Start internal temperature sensing. */ mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2); return 0; } static void mxs_lradc_hw_stop(struct mxs_lradc *lradc) { int i; mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1); for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++) mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i)); } static const struct of_device_id mxs_lradc_dt_ids[] = { { .compatible = "fsl,imx23-lradc", .data = (void *)IMX23_LRADC, }, { .compatible = "fsl,imx28-lradc", .data = (void *)IMX28_LRADC, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_lradc_dt_ids); static int mxs_lradc_probe_touchscreen(struct mxs_lradc *lradc, struct device_node *lradc_node) { int ret; u32 ts_wires = 0, adapt; ret = of_property_read_u32(lradc_node, "fsl,lradc-touchscreen-wires", &ts_wires); if (ret) return -ENODEV; /* touchscreen feature disabled */ switch (ts_wires) { case 4: lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_4WIRE; break; case 5: if (lradc->soc == IMX28_LRADC) { lradc->use_touchscreen = MXS_LRADC_TOUCHSCREEN_5WIRE; break; } /* fall through an error message for i.MX23 */ default: dev_err(lradc->dev, "Unsupported number of touchscreen wires (%d)\n", ts_wires); return -EINVAL; } if (of_property_read_u32(lradc_node, "fsl,ave-ctrl", &adapt)) { lradc->over_sample_cnt = 4; } else { if (adapt < 1 || adapt > 32) { dev_err(lradc->dev, "Invalid sample count (%u)\n", adapt); return -EINVAL; } lradc->over_sample_cnt = adapt; } if (of_property_read_u32(lradc_node, "fsl,ave-delay", &adapt)) { lradc->over_sample_delay = 2; } else { if (adapt < 2 || adapt > LRADC_DELAY_DELAY_MASK + 1) { dev_err(lradc->dev, "Invalid sample delay (%u)\n", adapt); return -EINVAL; } lradc->over_sample_delay = adapt; } if (of_property_read_u32(lradc_node, "fsl,settling", &adapt)) { lradc->settling_delay = 10; } else { if (adapt < 1 || adapt > LRADC_DELAY_DELAY_MASK) { dev_err(lradc->dev, "Invalid settling delay (%u)\n", adapt); return -EINVAL; } lradc->settling_delay = adapt; } return 0; } static int mxs_lradc_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxs_lradc_dt_ids, &pdev->dev); const struct mxs_lradc_of_config *of_cfg = &mxs_lradc_of_config[(enum mxs_lradc_id)of_id->data]; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct mxs_lradc *lradc; struct iio_dev *iio; struct resource *iores; int ret = 0, touch_ret; int i, s; uint64_t scale_uv; /* Allocate the IIO device. */ iio = devm_iio_device_alloc(dev, sizeof(*lradc)); if (!iio) { dev_err(dev, "Failed to allocate IIO device\n"); return -ENOMEM; } lradc = iio_priv(iio); lradc->soc = (enum mxs_lradc_id)of_id->data; /* Grab the memory area */ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); lradc->dev = &pdev->dev; lradc->base = devm_ioremap_resource(dev, iores); if (IS_ERR(lradc->base)) return PTR_ERR(lradc->base); lradc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(lradc->clk)) { dev_err(dev, "Failed to get the delay unit clock\n"); return PTR_ERR(lradc->clk); } ret = clk_prepare_enable(lradc->clk); if (ret != 0) { dev_err(dev, "Failed to enable the delay unit clock\n"); return ret; } touch_ret = mxs_lradc_probe_touchscreen(lradc, node); if (touch_ret == 0) lradc->buffer_vchans = BUFFER_VCHANS_LIMITED; else lradc->buffer_vchans = BUFFER_VCHANS_ALL; /* Grab all IRQ sources */ for (i = 0; i < of_cfg->irq_count; i++) { lradc->irq[i] = platform_get_irq(pdev, i); if (lradc->irq[i] < 0) { ret = lradc->irq[i]; goto err_clk; } ret = devm_request_irq(dev, lradc->irq[i], mxs_lradc_handle_irq, 0, of_cfg->irq_name[i], iio); if (ret) goto err_clk; } lradc->vref_mv = of_cfg->vref_mv; platform_set_drvdata(pdev, iio); init_completion(&lradc->completion); mutex_init(&lradc->lock); iio->name = pdev->name; iio->dev.parent = &pdev->dev; iio->info = &mxs_lradc_iio_info; iio->modes = INDIO_DIRECT_MODE; iio->channels = mxs_lradc_chan_spec; iio->num_channels = ARRAY_SIZE(mxs_lradc_chan_spec); iio->masklength = LRADC_MAX_TOTAL_CHANS; ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time, &mxs_lradc_trigger_handler, &mxs_lradc_buffer_ops); if (ret) goto err_clk; ret = mxs_lradc_trigger_init(iio); if (ret) goto err_trig; /* Populate available ADC input ranges */ for (i = 0; i < LRADC_MAX_TOTAL_CHANS; i++) { for (s = 0; s < ARRAY_SIZE(lradc->scale_avail[i]); s++) { /* * [s=0] = optional divider by two disabled (default) * [s=1] = optional divider by two enabled * * The scale is calculated by doing: * Vref >> (realbits - s) * which multiplies by two on the second component * of the array. */ scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >> (LRADC_RESOLUTION - s); lradc->scale_avail[i][s].nano = do_div(scale_uv, 100000000) * 10; lradc->scale_avail[i][s].integer = scale_uv; } } /* Configure the hardware. */ ret = mxs_lradc_hw_init(lradc); if (ret) goto err_dev; /* Register the touchscreen input device. */ if (touch_ret == 0) { ret = mxs_lradc_ts_register(lradc); if (ret) goto err_ts_register; } /* Register IIO device. */ ret = iio_device_register(iio); if (ret) { dev_err(dev, "Failed to register IIO device\n"); goto err_ts; } return 0; err_ts: mxs_lradc_ts_unregister(lradc); err_ts_register: mxs_lradc_hw_stop(lradc); err_dev: mxs_lradc_trigger_remove(iio); err_trig: iio_triggered_buffer_cleanup(iio); err_clk: clk_disable_unprepare(lradc->clk); return ret; } static int mxs_lradc_remove(struct platform_device *pdev) { struct iio_dev *iio = platform_get_drvdata(pdev); struct mxs_lradc *lradc = iio_priv(iio); iio_device_unregister(iio); mxs_lradc_ts_unregister(lradc); mxs_lradc_hw_stop(lradc); mxs_lradc_trigger_remove(iio); iio_triggered_buffer_cleanup(iio); clk_disable_unprepare(lradc->clk); return 0; } static struct platform_driver mxs_lradc_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = mxs_lradc_dt_ids, }, .probe = mxs_lradc_probe, .remove = mxs_lradc_remove, }; module_platform_driver(mxs_lradc_driver); MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); MODULE_DESCRIPTION("Freescale i.MX28 LRADC driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
xerpi/linux
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
350
4118
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" static void gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) { struct nvkm_device *device = ibus->device; u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); } static void gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i) { struct nvkm_device *device = ibus->device; u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); } static void gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) { struct nvkm_device *device = ibus->device; u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); } void gk104_ibus_intr(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; u32 intr0 = nvkm_rd32(device, 0x120058); u32 intr1 = nvkm_rd32(device, 0x12005c); u32 hubnr = nvkm_rd32(device, 0x120070); u32 ropnr = nvkm_rd32(device, 0x120074); u32 gpcnr = nvkm_rd32(device, 0x120078); u32 i; for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) { u32 stat = 0x00000100 << i; if (intr0 & stat) { gk104_ibus_intr_hub(ibus, i); intr0 &= ~stat; } } for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) { u32 stat = 0x00010000 << i; if (intr0 & stat) { gk104_ibus_intr_rop(ibus, i); intr0 &= ~stat; } } for (i = 0; intr1 && i < gpcnr; i++) { u32 stat = 0x00000001 << i; if (intr1 & stat) { gk104_ibus_intr_gpc(ibus, i); intr1 &= ~stat; } } } static int gk104_ibus_init(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; nvkm_mask(device, 0x122318, 0x0003ffff, 0x00001000); nvkm_mask(device, 0x12231c, 0x0003ffff, 0x00000200); nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000200); nvkm_mask(device, 0x122358, 0x0003ffff, 0x00002880); return 0; } static const struct nvkm_subdev_func gk104_ibus = { .preinit = gk104_ibus_init, .init = gk104_ibus_init, .intr = gk104_ibus_intr, }; int gk104_ibus_new(struct nvkm_device *device, int index, struct nvkm_subdev **pibus) { struct nvkm_subdev *ibus; if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL))) return -ENOMEM; nvkm_subdev_ctor(&gk104_ibus, device, index, ibus); return 0; }
gpl-2.0
ajopanoor/mic_host_os
drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
606
4473
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA * * GPL HEADER END */ /* * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction * * Author: liang@whamcloud.com */ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/libcfs/libcfs.h" /** Global CPU partition table */ struct cfs_cpt_table *cfs_cpt_table __read_mostly; EXPORT_SYMBOL(cfs_cpt_table); #ifndef HAVE_LIBCFS_CPT #define CFS_CPU_VERSION_MAGIC 0xbabecafe struct cfs_cpt_table * cfs_cpt_table_alloc(unsigned int ncpt) { struct cfs_cpt_table *cptab; if (ncpt != 1) { CERROR("Can't support cpu partition number %d\n", ncpt); return NULL; } LIBCFS_ALLOC(cptab, sizeof(*cptab)); if (cptab != NULL) { cptab->ctb_version = CFS_CPU_VERSION_MAGIC; cptab->ctb_nparts = ncpt; } return cptab; } EXPORT_SYMBOL(cfs_cpt_table_alloc); void cfs_cpt_table_free(struct cfs_cpt_table *cptab) { LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC); LIBCFS_FREE(cptab, sizeof(*cptab)); } EXPORT_SYMBOL(cfs_cpt_table_free); #ifdef CONFIG_SMP int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len) { int rc = 0; rc = snprintf(buf, len, "%d\t: %d\n", 0, 0); len -= rc; if (len <= 0) return -EFBIG; return rc; } EXPORT_SYMBOL(cfs_cpt_table_print); #endif /* CONFIG_SMP */ int cfs_cpt_number(struct cfs_cpt_table *cptab) { return 1; } EXPORT_SYMBOL(cfs_cpt_number); int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) { return 1; } EXPORT_SYMBOL(cfs_cpt_weight); int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) { return 1; } EXPORT_SYMBOL(cfs_cpt_online); int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) { return 1; } EXPORT_SYMBOL(cfs_cpt_set_cpu); void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu) { } EXPORT_SYMBOL(cfs_cpt_unset_cpu); int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { return 1; } EXPORT_SYMBOL(cfs_cpt_set_cpumask); void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask) { } EXPORT_SYMBOL(cfs_cpt_unset_cpumask); int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node) { return 1; } EXPORT_SYMBOL(cfs_cpt_set_node); void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node) { } EXPORT_SYMBOL(cfs_cpt_unset_node); int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) { return 1; } EXPORT_SYMBOL(cfs_cpt_set_nodemask); void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask) { } EXPORT_SYMBOL(cfs_cpt_unset_nodemask); void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt) { } EXPORT_SYMBOL(cfs_cpt_clear); int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt) { return 0; } EXPORT_SYMBOL(cfs_cpt_spread_node); int cfs_cpu_ht_nsiblings(int cpu) { return 1; } EXPORT_SYMBOL(cfs_cpu_ht_nsiblings); int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap) { return 0; } EXPORT_SYMBOL(cfs_cpt_current); int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) { return 0; } EXPORT_SYMBOL(cfs_cpt_of_cpu); int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) { return 0; } EXPORT_SYMBOL(cfs_cpt_bind); void cfs_cpu_fini(void) { if (cfs_cpt_table != NULL) { cfs_cpt_table_free(cfs_cpt_table); cfs_cpt_table = NULL; } } int cfs_cpu_init(void) { cfs_cpt_table = cfs_cpt_table_alloc(1); return cfs_cpt_table != NULL ? 0 : -1; } #endif /* HAVE_LIBCFS_CPT */
gpl-2.0
mattkelly/linux-2.6-xlnx
drivers/rtc/rtc-ab3100.c
606
7373
/* * Copyright (C) 2007-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * RTC clock driver for the AB3100 Analog Baseband Chip * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/mfd/ab3100.h> /* Clock rate in Hz */ #define AB3100_RTC_CLOCK_RATE 32768 /* * The AB3100 RTC registers. These are the same for * AB3000 and AB3100. * Control register: * Bit 0: RTC Monitor cleared=0, active=1, if you set it * to 1 it remains active until RTC power is lost. * Bit 1: 32 kHz Oscillator, 0 = on, 1 = bypass * Bit 2: Alarm on, 0 = off, 1 = on * Bit 3: 32 kHz buffer disabling, 0 = enabled, 1 = disabled */ #define AB3100_RTC 0x53 /* default setting, buffer disabled, alarm on */ #define RTC_SETTING 0x30 /* Alarm when AL0-AL3 == TI0-TI3 */ #define AB3100_AL0 0x56 #define AB3100_AL1 0x57 #define AB3100_AL2 0x58 #define AB3100_AL3 0x59 /* This 48-bit register that counts up at 32768 Hz */ #define AB3100_TI0 0x5a #define AB3100_TI1 0x5b #define AB3100_TI2 0x5c #define AB3100_TI3 0x5d #define AB3100_TI4 0x5e #define AB3100_TI5 0x5f /* * RTC clock functions and device struct declaration */ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) { struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2, AB3100_TI3, AB3100_TI4, AB3100_TI5}; unsigned char buf[6]; u64 fat_time = (u64) secs * AB3100_RTC_CLOCK_RATE * 2; int err = 0; int i; buf[0] = (fat_time) & 0xFF; buf[1] = (fat_time >> 8) & 0xFF; buf[2] = (fat_time >> 16) & 0xFF; buf[3] = (fat_time >> 24) & 0xFF; buf[4] = (fat_time >> 32) & 0xFF; buf[5] = (fat_time >> 40) & 0xFF; for (i = 0; i < 6; i++) { err = ab3100_set_register_interruptible(ab3100_data, regs[i], buf[i]); if (err) return err; } /* Set the flag to mark that the clock is now set */ return ab3100_mask_and_set_register_interruptible(ab3100_data, AB3100_RTC, 0xFE, 0x01); } static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u8 rtcval; int err; err = ab3100_get_register_interruptible(ab3100_data, AB3100_RTC, &rtcval); if (err) return err; if (!(rtcval & 0x01)) { dev_info(dev, "clock not set (lost power)"); return -EINVAL; } else { u64 fat_time; u8 buf[6]; /* Read out time registers */ err = ab3100_get_register_page_interruptible(ab3100_data, AB3100_TI0, buf, 6); if (err != 0) return err; fat_time = ((u64) buf[5] << 40) | ((u64) buf[4] << 32) | ((u64) buf[3] << 24) | ((u64) buf[2] << 16) | ((u64) buf[1] << 8) | (u64) buf[0]; time = (unsigned long) (fat_time / (u64) (AB3100_RTC_CLOCK_RATE * 2)); } rtc_time_to_tm(time, tm); return rtc_valid_tm(tm); } static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u64 fat_time; u8 buf[6]; u8 rtcval; int err; /* Figure out if alarm is enabled or not */ err = ab3100_get_register_interruptible(ab3100_data, AB3100_RTC, &rtcval); if (err) return err; if (rtcval & 0x04) alarm->enabled = 1; else alarm->enabled = 0; /* No idea how this could be represented */ alarm->pending = 0; /* Read out alarm registers, only 4 bytes */ err = ab3100_get_register_page_interruptible(ab3100_data, AB3100_AL0, buf, 4); if (err) return err; fat_time = ((u64) buf[3] << 40) | ((u64) buf[2] << 32) | ((u64) buf[1] << 24) | ((u64) buf[0] << 16); time = (unsigned long) (fat_time / (u64) (AB3100_RTC_CLOCK_RATE * 2)); rtc_time_to_tm(time, &alarm->time); return rtc_valid_tm(&alarm->time); } static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3}; unsigned char buf[4]; unsigned long secs; u64 fat_time; int err; int i; rtc_tm_to_time(&alarm->time, &secs); fat_time = (u64) secs * AB3100_RTC_CLOCK_RATE * 2; buf[0] = (fat_time >> 16) & 0xFF; buf[1] = (fat_time >> 24) & 0xFF; buf[2] = (fat_time >> 32) & 0xFF; buf[3] = (fat_time >> 40) & 0xFF; /* Set the alarm */ for (i = 0; i < 4; i++) { err = ab3100_set_register_interruptible(ab3100_data, regs[i], buf[i]); if (err) return err; } /* Then enable the alarm */ return ab3100_mask_and_set_register_interruptible(ab3100_data, AB3100_RTC, ~(1 << 2), alarm->enabled << 2); } static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) { struct ab3100 *ab3100_data = dev_get_drvdata(dev); /* * It's not possible to enable/disable the alarm IRQ for this RTC. * It does not actually trigger any IRQ: instead its only function is * to power up the system, if it wasn't on. This will manifest as * a "power up cause" in the AB3100 power driver (battery charging etc) * and need to be handled there instead. */ if (enabled) return ab3100_mask_and_set_register_interruptible(ab3100_data, AB3100_RTC, ~(1 << 2), 1 << 2); else return ab3100_mask_and_set_register_interruptible(ab3100_data, AB3100_RTC, ~(1 << 2), 0); } static const struct rtc_class_ops ab3100_rtc_ops = { .read_time = ab3100_rtc_read_time, .set_mmss = ab3100_rtc_set_mmss, .read_alarm = ab3100_rtc_read_alarm, .set_alarm = ab3100_rtc_set_alarm, .alarm_irq_enable = ab3100_rtc_irq_enable, }; static int __init ab3100_rtc_probe(struct platform_device *pdev) { int err; u8 regval; struct rtc_device *rtc; struct ab3100 *ab3100_data = platform_get_drvdata(pdev); /* The first RTC register needs special treatment */ err = ab3100_get_register_interruptible(ab3100_data, AB3100_RTC, &regval); if (err) { dev_err(&pdev->dev, "unable to read RTC register\n"); return -ENODEV; } if ((regval & 0xFE) != RTC_SETTING) { dev_warn(&pdev->dev, "not default value in RTC reg 0x%x\n", regval); } if ((regval & 1) == 0) { /* * Set bit to detect power loss. * This bit remains until RTC power is lost. */ regval = 1 | RTC_SETTING; err = ab3100_set_register_interruptible(ab3100_data, AB3100_RTC, regval); /* Ignore any error on this write */ } rtc = rtc_device_register("ab3100-rtc", &pdev->dev, &ab3100_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { err = PTR_ERR(rtc); return err; } return 0; } static int __exit ab3100_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc); return 0; } static struct platform_driver ab3100_rtc_driver = { .driver = { .name = "ab3100-rtc", .owner = THIS_MODULE, }, .remove = __exit_p(ab3100_rtc_remove), }; static int __init ab3100_rtc_init(void) { return platform_driver_probe(&ab3100_rtc_driver, ab3100_rtc_probe); } static void __exit ab3100_rtc_exit(void) { platform_driver_unregister(&ab3100_rtc_driver); } module_init(ab3100_rtc_init); module_exit(ab3100_rtc_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("AB3100 RTC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
oSaiYa/linux-sh4
init/do_mounts_rd.c
606
8166
#include <linux/kernel.h> #include <linux/fs.h> #include <linux/minix_fs.h> #include <linux/ext2_fs.h> #include <linux/romfs_fs.h> #include <linux/cramfs_fs.h> #include <linux/initrd.h> #include <linux/string.h> #include "do_mounts.h" #include "../fs/squashfs/squashfs_fs.h" #include <linux/decompress/generic.h> int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ static int __init prompt_ramdisk(char *str) { rd_prompt = simple_strtol(str,NULL,0) & 1; return 1; } __setup("prompt_ramdisk=", prompt_ramdisk); int __initdata rd_image_start; /* starting block # of image */ static int __init ramdisk_start_setup(char *str) { rd_image_start = simple_strtol(str,NULL,0); return 1; } __setup("ramdisk_start=", ramdisk_start_setup); static int __init crd_load(int in_fd, int out_fd, decompress_fn deco); /* * This routine tries to find a RAM disk image to load, and returns the * number of blocks to read for a non-compressed image, 0 if the image * is a compressed image, and -1 if an image with the right magic * numbers could not be found. * * We currently check for the following magic numbers: * minix * ext2 * romfs * cramfs * squashfs * gzip */ static int __init identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) { const int size = 512; struct minix_super_block *minixsb; struct ext2_super_block *ext2sb; struct romfs_super_block *romfsb; struct cramfs_super *cramfsb; struct squashfs_super_block *squashfsb; int nblocks = -1; unsigned char *buf; const char *compress_name; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -1; minixsb = (struct minix_super_block *) buf; ext2sb = (struct ext2_super_block *) buf; romfsb = (struct romfs_super_block *) buf; cramfsb = (struct cramfs_super *) buf; squashfsb = (struct squashfs_super_block *) buf; memset(buf, 0xe5, size); /* * Read block 0 to test for compressed kernel */ sys_lseek(fd, start_block * BLOCK_SIZE, 0); sys_read(fd, buf, size); *decompressor = decompress_method(buf, size, &compress_name); if (compress_name) { printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n", compress_name, start_block); if (!*decompressor) printk(KERN_EMERG "RAMDISK: %s decompressor not configured!\n", compress_name); nblocks = 0; goto done; } /* romfs is at block zero too */ if (romfsb->word0 == ROMSB_WORD0 && romfsb->word1 == ROMSB_WORD1) { printk(KERN_NOTICE "RAMDISK: romfs filesystem found at block %d\n", start_block); nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS; goto done; } if (cramfsb->magic == CRAMFS_MAGIC) { printk(KERN_NOTICE "RAMDISK: cramfs filesystem found at block %d\n", start_block); nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; goto done; } /* squashfs is at block zero too */ if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) { printk(KERN_NOTICE "RAMDISK: squashfs filesystem found at block %d\n", start_block); nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS; goto done; } /* * Read block 1 to test for minix and ext2 superblock */ sys_lseek(fd, (start_block+1) * BLOCK_SIZE, 0); sys_read(fd, buf, size); /* Try minix */ if (minixsb->s_magic == MINIX_SUPER_MAGIC || minixsb->s_magic == MINIX_SUPER_MAGIC2) { printk(KERN_NOTICE "RAMDISK: Minix filesystem found at block %d\n", start_block); nblocks = minixsb->s_nzones << minixsb->s_log_zone_size; goto done; } /* Try ext2 */ if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) { printk(KERN_NOTICE "RAMDISK: ext2 filesystem found at block %d\n", start_block); nblocks = le32_to_cpu(ext2sb->s_blocks_count) << le32_to_cpu(ext2sb->s_log_block_size); goto done; } printk(KERN_NOTICE "RAMDISK: Couldn't find valid RAM disk image starting at %d.\n", start_block); done: sys_lseek(fd, start_block * BLOCK_SIZE, 0); kfree(buf); return nblocks; } int __init rd_load_image(char *from) { int res = 0; int in_fd, out_fd; unsigned long rd_blocks, devblocks; int nblocks, i, disk; char *buf = NULL; unsigned short rotate = 0; decompress_fn decompressor = NULL; #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif out_fd = sys_open("/dev/ram", O_RDWR, 0); if (out_fd < 0) goto out; in_fd = sys_open(from, O_RDONLY, 0); if (in_fd < 0) goto noclose_input; nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor); if (nblocks < 0) goto done; if (nblocks == 0) { if (crd_load(in_fd, out_fd, decompressor) == 0) goto successful_load; goto done; } /* * NOTE NOTE: nblocks is not actually blocks but * the number of kibibytes of data to load into a ramdisk. * So any ramdisk block size that is a multiple of 1KiB should * work when the appropriate ramdisk_blocksize is specified * on the command line. * * The default ramdisk_blocksize is 1KiB and it is generally * silly to use anything else, so make sure to use 1KiB * blocksize while generating ext2fs ramdisk-images. */ if (sys_ioctl(out_fd, BLKGETSIZE, (unsigned long)&rd_blocks) < 0) rd_blocks = 0; else rd_blocks >>= 1; if (nblocks > rd_blocks) { printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n", nblocks, rd_blocks); goto done; } /* * OK, time to copy in the data */ if (sys_ioctl(in_fd, BLKGETSIZE, (unsigned long)&devblocks) < 0) devblocks = 0; else devblocks >>= 1; if (strcmp(from, "/initrd.image") == 0) devblocks = nblocks; if (devblocks == 0) { printk(KERN_ERR "RAMDISK: could not determine device size\n"); goto done; } buf = kmalloc(BLOCK_SIZE, GFP_KERNEL); if (!buf) { printk(KERN_ERR "RAMDISK: could not allocate buffer\n"); goto done; } printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ", nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : ""); for (i = 0, disk = 1; i < nblocks; i++) { if (i && (i % devblocks == 0)) { printk("done disk #%d.\n", disk++); rotate = 0; if (sys_close(in_fd)) { printk("Error closing the disk.\n"); goto noclose_input; } change_floppy("disk #%d", disk); in_fd = sys_open(from, O_RDONLY, 0); if (in_fd < 0) { printk("Error opening disk.\n"); goto noclose_input; } printk("Loading disk #%d... ", disk); } sys_read(in_fd, buf, BLOCK_SIZE); sys_write(out_fd, buf, BLOCK_SIZE); #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) if (!(i % 16)) { printk("%c\b", rotator[rotate & 0x3]); rotate++; } #endif } printk("done.\n"); successful_load: res = 1; done: sys_close(in_fd); noclose_input: sys_close(out_fd); out: kfree(buf); sys_unlink("/dev/ram"); return res; } int __init rd_load_disk(int n) { if (rd_prompt) change_floppy("root floppy disk to be loaded into RAM disk"); create_dev("/dev/root", ROOT_DEV); create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n)); return rd_load_image("/dev/root"); } static int exit_code; static int decompress_error; static int crd_infd, crd_outfd; static int __init compr_fill(void *buf, unsigned int len) { int r = sys_read(crd_infd, buf, len); if (r < 0) printk(KERN_ERR "RAMDISK: error while reading compressed data"); else if (r == 0) printk(KERN_ERR "RAMDISK: EOF while reading compressed data"); return r; } static int __init compr_flush(void *window, unsigned int outcnt) { int written = sys_write(crd_outfd, window, outcnt); if (written != outcnt) { if (decompress_error == 0) printk(KERN_ERR "RAMDISK: incomplete write (%d != %d)\n", written, outcnt); decompress_error = 1; return -1; } return outcnt; } static void __init error(char *x) { printk(KERN_ERR "%s\n", x); exit_code = 1; decompress_error = 1; } static int __init crd_load(int in_fd, int out_fd, decompress_fn deco) { int result; crd_infd = in_fd; crd_outfd = out_fd; result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error); if (decompress_error) result = 1; return result; }
gpl-2.0
silver-alx/ac100_kernel
sound/pci/cs5530.c
606
7253
/* * cs5530.c - Initialisation code for Cyrix/NatSemi VSA1 softaudio * * (C) Copyright 2007 Ash Willis <ashwillis@programmer.net> * (C) Copyright 2003 Red Hat Inc <alan@lxorguk.ukuu.org.uk> * * This driver was ported (shamelessly ripped ;) from oss/kahlua.c but I did * mess with it a bit. The chip seems to have to have trouble with full duplex * mode. If we're recording in 8bit 8000kHz, say, and we then attempt to * simultaneously play back audio at 16bit 44100kHz, the device actually plays * back in the same format in which it is capturing. By forcing the chip to * always play/capture in 16/44100, we can let alsa-lib convert the samples and * that way we can hack up some full duplex audio. * * XpressAudio(tm) is used on the Cyrix MediaGX (now NatSemi Geode) systems. * The older version (VSA1) provides fairly good soundblaster emulation * although there are a couple of bugs: large DMA buffers break record, * and the MPU event handling seems suspect. VSA2 allows the native driver * to control the AC97 audio engine directly and requires a different driver. * * Thanks to National Semiconductor for providing the needed information * on the XpressAudio(tm) internals. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * TO DO: * Investigate whether we can portably support Cognac (5520) in the * same manner. */ #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <sound/core.h> #include <sound/sb.h> #include <sound/initval.h> MODULE_AUTHOR("Ash Willis"); MODULE_DESCRIPTION("CS5530 Audio"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; struct snd_cs5530 { struct snd_card *card; struct pci_dev *pci; struct snd_sb *sb; unsigned long pci_base; }; static struct pci_device_id snd_cs5530_ids[] = { {PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_AUDIO, PCI_ANY_ID, PCI_ANY_ID, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, snd_cs5530_ids); static int snd_cs5530_free(struct snd_cs5530 *chip) { pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_cs5530_dev_free(struct snd_device *device) { struct snd_cs5530 *chip = device->device_data; return snd_cs5530_free(chip); } static void __devexit snd_cs5530_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static u8 __devinit snd_cs5530_mixer_read(unsigned long io, u8 reg) { outb(reg, io + 4); udelay(20); reg = inb(io + 5); udelay(20); return reg; } static int __devinit snd_cs5530_create(struct snd_card *card, struct pci_dev *pci, struct snd_cs5530 **rchip) { struct snd_cs5530 *chip; unsigned long sb_base; u8 irq, dma8, dma16 = 0; u16 map; void __iomem *mem; int err; static struct snd_device_ops ops = { .dev_free = snd_cs5530_dev_free, }; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->card = card; chip->pci = pci; err = pci_request_regions(pci, "CS5530"); if (err < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->pci_base = pci_resource_start(pci, 0); mem = pci_ioremap_bar(pci, 0); if (mem == NULL) { kfree(chip); pci_disable_device(pci); return -EBUSY; } map = readw(mem + 0x18); iounmap(mem); /* Map bits 0:1 * 0x20 + 0x200 = sb base 2 sb enable 3 adlib enable 5 MPU enable 0x330 6 MPU enable 0x300 The other bits may be used internally so must be masked */ sb_base = 0x220 + 0x20 * (map & 3); if (map & (1<<2)) printk(KERN_INFO "CS5530: XpressAudio at 0x%lx\n", sb_base); else { printk(KERN_ERR "Could not find XpressAudio!\n"); snd_cs5530_free(chip); return -ENODEV; } if (map & (1<<5)) printk(KERN_INFO "CS5530: MPU at 0x300\n"); else if (map & (1<<6)) printk(KERN_INFO "CS5530: MPU at 0x330\n"); irq = snd_cs5530_mixer_read(sb_base, 0x80) & 0x0F; dma8 = snd_cs5530_mixer_read(sb_base, 0x81); if (dma8 & 0x20) dma16 = 5; else if (dma8 & 0x40) dma16 = 6; else if (dma8 & 0x80) dma16 = 7; else { printk(KERN_ERR "CS5530: No 16bit DMA enabled\n"); snd_cs5530_free(chip); return -ENODEV; } if (dma8 & 0x01) dma8 = 0; else if (dma8 & 02) dma8 = 1; else if (dma8 & 0x08) dma8 = 3; else { printk(KERN_ERR "CS5530: No 8bit DMA enabled\n"); snd_cs5530_free(chip); return -ENODEV; } if (irq & 1) irq = 9; else if (irq & 2) irq = 5; else if (irq & 4) irq = 7; else if (irq & 8) irq = 10; else { printk(KERN_ERR "CS5530: SoundBlaster IRQ not set\n"); snd_cs5530_free(chip); return -ENODEV; } printk(KERN_INFO "CS5530: IRQ: %d DMA8: %d DMA16: %d\n", irq, dma8, dma16); err = snd_sbdsp_create(card, sb_base, irq, snd_sb16dsp_interrupt, dma8, dma16, SB_HW_CS5530, &chip->sb); if (err < 0) { printk(KERN_ERR "CS5530: Could not create SoundBlaster\n"); snd_cs5530_free(chip); return err; } err = snd_sb16dsp_pcm(chip->sb, 0, &chip->sb->pcm); if (err < 0) { printk(KERN_ERR "CS5530: Could not create PCM\n"); snd_cs5530_free(chip); return err; } err = snd_sbmixer_new(chip->sb); if (err < 0) { printk(KERN_ERR "CS5530: Could not create Mixer\n"); snd_cs5530_free(chip); return err; } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_cs5530_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; } static int __devinit snd_cs5530_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_cs5530 *chip = NULL; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; err = snd_cs5530_create(card, pci, &chip); if (err < 0) { snd_card_free(card); return err; } strcpy(card->driver, "CS5530"); strcpy(card->shortname, "CS5530 Audio"); sprintf(card->longname, "%s at 0x%lx", card->shortname, chip->pci_base); err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static struct pci_driver driver = { .name = "CS5530_Audio", .id_table = snd_cs5530_ids, .probe = snd_cs5530_probe, .remove = __devexit_p(snd_cs5530_remove), }; static int __init alsa_card_cs5530_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_cs5530_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_cs5530_init) module_exit(alsa_card_cs5530_exit)
gpl-2.0
siskin/bluetooth-next
drivers/iio/pressure/st_pressure_i2c.c
606
2088
/* * STMicroelectronics pressures driver * * Copyright 2013 STMicroelectronics Inc. * * Denis Ciocca <denis.ciocca@st.com> * * Licensed under the GPL-2. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/iio/iio.h> #include <linux/iio/common/st_sensors.h> #include <linux/iio/common/st_sensors_i2c.h> #include "st_pressure.h" #ifdef CONFIG_OF static const struct of_device_id st_press_of_match[] = { { .compatible = "st,lps001wp-press", .data = LPS001WP_PRESS_DEV_NAME, }, { .compatible = "st,lps25h-press", .data = LPS25H_PRESS_DEV_NAME, }, { .compatible = "st,lps331ap-press", .data = LPS331AP_PRESS_DEV_NAME, }, {}, }; MODULE_DEVICE_TABLE(of, st_press_of_match); #else #define st_press_of_match NULL #endif static int st_press_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct iio_dev *indio_dev; struct st_sensor_data *press_data; int err; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*press_data)); if (!indio_dev) return -ENOMEM; press_data = iio_priv(indio_dev); st_sensors_of_i2c_probe(client, st_press_of_match); st_sensors_i2c_configure(indio_dev, client, press_data); err = st_press_common_probe(indio_dev); if (err < 0) return err; return 0; } static int st_press_i2c_remove(struct i2c_client *client) { st_press_common_remove(i2c_get_clientdata(client)); return 0; } static const struct i2c_device_id st_press_id_table[] = { { LPS001WP_PRESS_DEV_NAME }, { LPS25H_PRESS_DEV_NAME }, { LPS331AP_PRESS_DEV_NAME }, {}, }; MODULE_DEVICE_TABLE(i2c, st_press_id_table); static struct i2c_driver st_press_driver = { .driver = { .owner = THIS_MODULE, .name = "st-press-i2c", .of_match_table = of_match_ptr(st_press_of_match), }, .probe = st_press_i2c_probe, .remove = st_press_i2c_remove, .id_table = st_press_id_table, }; module_i2c_driver(st_press_driver); MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); MODULE_DESCRIPTION("STMicroelectronics pressures i2c driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
viaembedded/arm-soc
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
606
12645
/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <asm-generic/kmap_types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" enum { MLX5_PAGES_CANT_GIVE = 0, MLX5_PAGES_GIVE = 1, MLX5_PAGES_TAKE = 2 }; enum { MLX5_BOOT_PAGES = 1, MLX5_INIT_PAGES = 2, MLX5_POST_INIT_PAGES = 3 }; struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; s32 npages; struct work_struct work; }; struct fw_page { struct rb_node rb_node; u64 addr; struct page *page; u16 func_id; unsigned long bitmask; struct list_head list; unsigned free_count; }; struct mlx5_query_pages_inbox { struct mlx5_inbox_hdr hdr; u8 rsvd[8]; }; struct mlx5_query_pages_outbox { struct mlx5_outbox_hdr hdr; __be16 rsvd; __be16 func_id; __be32 num_pages; }; struct mlx5_manage_pages_inbox { struct mlx5_inbox_hdr hdr; __be16 rsvd; __be16 func_id; __be32 num_entries; __be64 pas[0]; }; struct mlx5_manage_pages_outbox { struct mlx5_outbox_hdr hdr; __be32 num_entries; u8 rsvd[4]; __be64 pas[0]; }; enum { MAX_RECLAIM_TIME_MSECS = 5000, }; enum { MLX5_MAX_RECLAIM_TIME_MILI = 5000, MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { struct rb_root *root = &dev->priv.page_root; struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; struct fw_page *nfp; struct fw_page *tfp; int i; while (*new) { parent = *new; tfp = rb_entry(parent, struct fw_page, rb_node); if (tfp->addr < addr) new = &parent->rb_left; else if (tfp->addr > addr) new = &parent->rb_right; else return -EEXIST; } nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) return -ENOMEM; nfp->addr = addr; nfp->page = page; nfp->func_id = func_id; nfp->free_count = MLX5_NUM_4K_IN_PAGE; for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) set_bit(i, &nfp->bitmask); rb_link_node(&nfp->rb_node, parent, new); rb_insert_color(&nfp->rb_node, root); list_add(&nfp->list, &dev->priv.free_list); return 0; } static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) { struct rb_root *root = &dev->priv.page_root; struct rb_node *tmp = root->rb_node; struct fw_page *result = NULL; struct fw_page *tfp; while (tmp) { tfp = rb_entry(tmp, struct fw_page, rb_node); if (tfp->addr < addr) { tmp = tmp->rb_left; } else if (tfp->addr > addr) { tmp = tmp->rb_right; } else { result = tfp; break; } } return result; } static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) { struct mlx5_query_pages_inbox in; struct mlx5_query_pages_outbox out; int err; memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); if (err) return err; if (out.hdr.status) return mlx5_cmd_status_to_err(&out.hdr); *npages = be32_to_cpu(out.num_pages); *func_id = be16_to_cpu(out.func_id); return err; } static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) { struct fw_page *fp; unsigned n; if (list_empty(&dev->priv.free_list)) return -ENOMEM; fp = list_entry(dev->priv.free_list.next, struct fw_page, list); n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); if (n >= MLX5_NUM_4K_IN_PAGE) { mlx5_core_warn(dev, "alloc 4k bug\n"); return -ENOENT; } clear_bit(n, &fp->bitmask); fp->free_count--; if (!fp->free_count) list_del(&fp->list); *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; return 0; } #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT) static void free_4k(struct mlx5_core_dev *dev, u64 addr) { struct fw_page *fwp; int n; fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); if (!fwp) { mlx5_core_warn(dev, "page not found\n"); return; } n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); } else if (fwp->free_count == 1) { list_add(&fwp->list, &dev->priv.free_list); } } static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) { struct page *page; u64 addr; int err; int nid = dev_to_node(&dev->pdev->dev); page = alloc_pages_node(nid, GFP_HIGHUSER, 0); if (!page) { mlx5_core_warn(dev, "failed to allocate page\n"); return -ENOMEM; } addr = dma_map_page(&dev->pdev->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&dev->pdev->dev, addr)) { mlx5_core_warn(dev, "failed dma mapping page\n"); err = -ENOMEM; goto out_alloc; } err = insert_page(dev, addr, page, func_id); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); goto out_mapping; } return 0; out_mapping: dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); out_alloc: __free_page(page); return err; } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail) { struct mlx5_manage_pages_inbox *in; struct mlx5_manage_pages_outbox out; struct mlx5_manage_pages_inbox *nin; int inlen; u64 addr; int err; int i; inlen = sizeof(*in) + npages * sizeof(in->pas[0]); in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(dev, "vzalloc failed %d\n", inlen); return -ENOMEM; } memset(&out, 0, sizeof(out)); for (i = 0; i < npages; i++) { retry: err = alloc_4k(dev, &addr); if (err) { if (err == -ENOMEM) err = alloc_system_page(dev, func_id); if (err) goto out_4k; goto retry; } in->pas[i] = cpu_to_be64(addr); } in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); in->func_id = cpu_to_be16(func_id); in->num_entries = cpu_to_be32(npages); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); goto out_alloc; } dev->priv.fw_pages += npages; if (out.hdr.status) { err = mlx5_cmd_status_to_err(&out.hdr); if (err) { mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); goto out_alloc; } } mlx5_core_dbg(dev, "err %d\n", err); goto out_free; out_alloc: if (notify_fail) { nin = kzalloc(sizeof(*nin), GFP_KERNEL); if (!nin) { mlx5_core_warn(dev, "allocation failed\n"); goto out_4k; } memset(&out, 0, sizeof(out)); nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE); if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out))) mlx5_core_warn(dev, "page notify failed\n"); kfree(nin); } out_4k: for (i--; i >= 0; i--) free_4k(dev, be64_to_cpu(in->pas[i])); out_free: kvfree(in); return err; } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, int *nclaimed) { struct mlx5_manage_pages_inbox in; struct mlx5_manage_pages_outbox *out; int num_claimed; int outlen; u64 addr; int err; int i; if (nclaimed) *nclaimed = 0; memset(&in, 0, sizeof(in)); outlen = sizeof(*out) + npages * sizeof(out->pas[0]); out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); in.func_id = cpu_to_be16(func_id); in.num_entries = cpu_to_be32(npages); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages\n"); goto out_free; } dev->priv.fw_pages -= npages; if (out->hdr.status) { err = mlx5_cmd_status_to_err(&out->hdr); goto out_free; } num_claimed = be32_to_cpu(out->num_entries); if (nclaimed) *nclaimed = num_claimed; for (i = 0; i < num_claimed; i++) { addr = be64_to_cpu(out->pas[i]); free_4k(dev, addr); } out_free: kvfree(out); return err; } static void pages_work_handler(struct work_struct *work) { struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work); struct mlx5_core_dev *dev = req->dev; int err = 0; if (req->npages < 0) err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); else if (req->npages > 0) err = give_pages(dev, req->func_id, req->npages, 1); if (err) mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? "reclaim" : "give", err); kfree(req); } void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) { struct mlx5_pages_req *req; req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) { mlx5_core_warn(dev, "failed to allocate pages request\n"); return; } req->dev = dev; req->func_id = func_id; req->npages = npages; INIT_WORK(&req->work, pages_work_handler); queue_work(dev->priv.pg_wq, &req->work); } int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) { u16 uninitialized_var(func_id); s32 uninitialized_var(npages); int err; err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err; mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); return give_pages(dev, func_id, npages, 0); } enum { MLX5_BLKS_FOR_RECLAIM_PAGES = 12 }; static int optimal_reclaimed_pages(void) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_layout *lay; int ret; ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) - sizeof(struct mlx5_manage_pages_outbox)) / FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]); return ret; } int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); struct fw_page *fwp; struct rb_node *p; int nclaimed = 0; int err; do { p = rb_first(&dev->priv.page_root); if (p) { fwp = rb_entry(p, struct fw_page, rb_node); err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), &nclaimed); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); return err; } if (nclaimed) end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); } if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } } while (p); return 0; } void mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { /* nothing */ } int mlx5_pagealloc_start(struct mlx5_core_dev *dev) { dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; return 0; } void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { destroy_workqueue(dev->priv.pg_wq); }
gpl-2.0
temasek/SGS3-Sourcedrops
drivers/scsi/megaraid/megaraid_sas_base.c
862
142744
/* * Linux MegaRAID driver for SAS based RAID controllers * * Copyright (c) 2009-2011 LSI Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c * Version : v00.00.05.38-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote * Sumant Patro * Bo Yang * Adam Radford <linuxraid@lsi.com> * * Send feedback to: <megaraidlinux@lsi.com> * * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 * ATTN: Linuxraid */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/list.h> #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/poll.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "megaraid_sas_fusion.h" #include "megaraid_sas.h" /* * poll_mode_io:1- schedule complete completion from q cmd */ static unsigned int poll_mode_io; module_param_named(poll_mode_io, poll_mode_io, int, 0); MODULE_PARM_DESC(poll_mode_io, "Complete cmds from IO path, (default=0)"); /* * Number of sectors per IO command * Will be set in megasas_init_mfi if user does not provide */ static unsigned int max_sectors; module_param_named(max_sectors, max_sectors, int, 0); MODULE_PARM_DESC(max_sectors, "Maximum number of sectors per IO command"); static int msix_disable; module_param(msix_disable, int, S_IRUGO); MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance); static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); /* * PCI ID table for all supported controllers */ static struct pci_device_id megasas_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)}, /* ppc IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)}, /* gen2*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)}, /* skinny*/ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)}, /* xscale IOP, vega */ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; static struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; u32 megasas_dbg_lvl; static u32 support_device_change; /* define lock for aen poll */ spinlock_t poll_aen_lock; void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd); static void megasas_complete_cmd_dpc(unsigned long instance_addr); void megasas_release_fusion(struct megasas_instance *instance); int megasas_ioc_init_fusion(struct megasas_instance *instance); void megasas_free_cmds_fusion(struct megasas_instance *instance); u8 megasas_get_map_info(struct megasas_instance *instance); int megasas_sync_map_info(struct megasas_instance *instance); int wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); void megasas_reset_reply_desc(struct megasas_instance *instance); u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); int megasas_reset_fusion(struct Scsi_Host *shost); void megasas_fusion_ocr_wq(struct work_struct *work); void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); } /** * megasas_get_cmd - Get a command from the free pool * @instance: Adapter soft state * * Returns a free command from the pool */ struct megasas_cmd *megasas_get_cmd(struct megasas_instance *instance) { unsigned long flags; struct megasas_cmd *cmd = NULL; spin_lock_irqsave(&instance->cmd_pool_lock, flags); if (!list_empty(&instance->cmd_pool)) { cmd = list_entry((&instance->cmd_pool)->next, struct megasas_cmd, list); list_del_init(&cmd->list); } else { printk(KERN_ERR "megasas: Command pool empty!\n"); } spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); return cmd; } /** * megasas_return_cmd - Return a cmd to free command pool * @instance: Adapter soft state * @cmd: Command packet to be returned to free command pool */ inline void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; spin_lock_irqsave(&instance->cmd_pool_lock, flags); cmd->scmd = NULL; cmd->frame_count = 0; list_add_tail(&cmd->list, &instance->cmd_pool); spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); } /** * The following functions are defined for xscale * (deviceid : 1064R, PERC5) controllers */ /** * megasas_enable_intr_xscale - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) { writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_xscale -Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs) { u32 mask = 0x1f; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_xscale - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_msg_0); } /** * megasas_clear_interrupt_xscale - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_OB_INTR_STATUS_MASK) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_intr_status); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_xscale - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_xscale(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr >> 3)|(frame_count), &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_xscale - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 i; u32 pcidata; writel(MFI_ADP_RESET, &regs->inbound_doorbell); for (i = 0; i < 3; i++) msleep(1000); /* sleep for 3 secs */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); printk(KERN_NOTICE "pcidata = %x\n", pcidata); if (pcidata & 0x2) { printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata); pcidata &= ~0x2; pci_write_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, pcidata); for (i = 0; i < 2; i++) msleep(1000); /* need to wait 2 secs again */ pcidata = 0; pci_read_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata); if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata); pcidata = 0; pci_write_config_dword(instance->pdev, MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); } } return 0; } /** * megasas_check_reset_xscale - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { u32 consumer; consumer = *instance->consumer; if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { return 1; } return 0; } static struct megasas_instance_template megasas_instance_template_xscale = { .fire_cmd = megasas_fire_cmd_xscale, .enable_intr = megasas_enable_intr_xscale, .disable_intr = megasas_disable_intr_xscale, .clear_intr = megasas_clear_intr_xscale, .read_fw_status_reg = megasas_read_fw_status_reg_xscale, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_xscale, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions specific * to xscale (deviceid : 1064R, PERC5) controllers */ /** * The following functions are defined for ppc (deviceid : 0x60) * controllers */ /** * megasas_enable_intr_ppc - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) { writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000000, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_ppc - Disable interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_ppc - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_ppc - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) { u32 status, mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT) mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_doorbell_clear); return mfiStatus; } /** * megasas_fire_cmd_ppc - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_ppc(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_ppc - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_ppc(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_ppc = { .fire_cmd = megasas_fire_cmd_ppc, .enable_intr = megasas_enable_intr_ppc, .disable_intr = megasas_disable_intr_ppc, .clear_intr = megasas_clear_intr_ppc, .read_fw_status_reg = megasas_read_fw_status_reg_ppc, .adp_reset = megasas_adp_reset_xscale, .check_reset = megasas_check_reset_ppc, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * megasas_enable_intr_skinny - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) { writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_skinny - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_skinny - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_skinny - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) { return 0; } /* * Check if it is our interrupt */ if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) == MFI_STATE_FAULT) { mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } else mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; /* * Clear the interrupt by writing back the same value */ writel(status, &regs->outbound_intr_status); /* * dummy read to flush PCI */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_skinny - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_skinny(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel(0, &(regs)->inbound_high_queue_port); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_check_reset_skinny - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_skinny(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) return 1; return 0; } static struct megasas_instance_template megasas_instance_template_skinny = { .fire_cmd = megasas_fire_cmd_skinny, .enable_intr = megasas_enable_intr_skinny, .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * The following functions are defined for gen2 (deviceid : 0x78 0x79) * controllers */ /** * megasas_enable_intr_gen2 - Enables interrupts * @regs: MFI register set */ static inline void megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) { writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_disable_intr_gen2 - Disables interrupt * @regs: MFI register set */ static inline void megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs) { u32 mask = 0xFFFFFFFF; writel(mask, &regs->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_mask); } /** * megasas_read_fw_status_reg_gen2 - returns the current FW status value * @regs: MFI register set */ static u32 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs) { return readl(&(regs)->outbound_scratch_pad); } /** * megasas_clear_interrupt_gen2 - Check & clear interrupt * @regs: MFI register set */ static int megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) { u32 status; u32 mfiStatus = 0; /* * Check if it is our interrupt */ status = readl(&regs->outbound_intr_status); if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) { mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; } if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } /* * Clear the interrupt by writing back the same value */ if (mfiStatus) writel(status, &regs->outbound_doorbell_clear); /* Dummy readl to force pci flush */ readl(&regs->outbound_intr_status); return mfiStatus; } /** * megasas_fire_cmd_gen2 - Sends command to the FW * @frame_phys_addr : Physical address of cmd * @frame_count : Number of frames for the command * @regs : MFI register set */ static inline void megasas_fire_cmd_gen2(struct megasas_instance *instance, dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs) { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); writel((frame_phys_addr | (frame_count<<1))|1, &(regs)->inbound_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } /** * megasas_adp_reset_gen2 - For controller reset * @regs: MFI register set */ static int megasas_adp_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *reg_set) { u32 retry = 0 ; u32 HostDiag; u32 *seq_offset = &reg_set->seq_offset; u32 *hostdiag_offset = &reg_set->host_diag; if (instance->instancet == &megasas_instance_template_skinny) { seq_offset = &reg_set->fusion_seq_offset; hostdiag_offset = &reg_set->fusion_host_diag; } writel(0, seq_offset); writel(4, seq_offset); writel(0xb, seq_offset); writel(2, seq_offset); writel(7, seq_offset); writel(0xd, seq_offset); msleep(1000); HostDiag = (u32)readl(hostdiag_offset); while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 100) return 1; } printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); HostDiag = (u32)readl(hostdiag_offset); while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { msleep(100); HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); if (retry++ >= 1000) return 1; } return 0; } /** * megasas_check_reset_gen2 - For controller reset check * @regs: MFI register set */ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { return 1; } return 0; } static struct megasas_instance_template megasas_instance_template_gen2 = { .fire_cmd = megasas_fire_cmd_gen2, .enable_intr = megasas_enable_intr_gen2, .disable_intr = megasas_disable_intr_gen2, .clear_intr = megasas_clear_intr_gen2, .read_fw_status_reg = megasas_read_fw_status_reg_gen2, .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_gen2, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, .init_adapter = megasas_init_adapter_mfi, .build_and_issue_cmd = megasas_build_and_issue_cmd, .issue_dcmd = megasas_issue_dcmd, }; /** * This is the end of set of functions & definitions * specific to gen2 (deviceid : 0x78, 0x79) controllers */ /* * Template added for TB (Fusion) */ extern struct megasas_instance_template megasas_instance_template_fusion; /** * megasas_issue_polled - Issues a polling command * @instance: Adapter soft state * @cmd: Command packet to be issued * * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { struct megasas_header *frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* * Issue the frame using inbound queue port */ instance->instancet->issue_dcmd(instance, cmd); /* * Wait for cmd_status to change */ return wait_and_poll(instance, cmd); } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs * Used to issue ioctl commands. */ static int megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); return 0; } /** * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * * MFI firmware can abort previously issued AEN command (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd_to_abort) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; cmd = megasas_get_cmd(instance); if (!cmd) return -1; abort_fr = &cmd->frame->abort; /* * Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; instance->instancet->issue_dcmd(instance, cmd); /* * Wait for this cmd to complete */ wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); return 0; } /** * megasas_make_sgl32 - Prepares 32-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); } } return sge_count; } /** * megasas_make_sgl64 - Prepares 64-bit SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); BUG_ON(sge_count < 0); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); } } return sge_count; } /** * megasas_make_sgl_skinny - Prepares IEEE SGL * @instance: Adapter soft state * @scp: SCSI command from the mid-layer * @mfi_sgl: SGL to be filled in * * If successful, this function returns the number of SG elements. Otherwise, * it returnes -1. */ static int megasas_make_sgl_skinny(struct megasas_instance *instance, struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl) { int i; int sge_count; struct scatterlist *os_sgl; sge_count = scsi_dma_map(scp); if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); mfi_sgl->sge_skinny[i].phys_addr = sg_dma_address(os_sgl); mfi_sgl->sge_skinny[i].flag = 0; } } return sge_count; } /** * megasas_get_frame_count - Computes the number of frames * @frame_type : type of frame- io or pthru frame * @sge_count : number of sg elements * * Returns the number of frames required for numnber of sge's (sge_count) */ static u32 megasas_get_frame_count(struct megasas_instance *instance, u8 sge_count, u8 frame_type) { int num_cnt; int sge_bytes; u32 sge_sz; u32 frame_count=0; sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Main frame can contain 2 SGEs for 64-bit SGLs and * 3 SGEs for 32-bit SGLs for ldio & * 1 SGEs for 64-bit SGLs and * 2 SGEs for 32-bit SGLs for pthru frame */ if (unlikely(frame_type == PTHRU_FRAME)) { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 1; else num_cnt = sge_count - 2; } else { if (instance->flag_ieee == 1) { num_cnt = sge_count - 1; } else if (IS_DMA64) num_cnt = sge_count - 2; else num_cnt = sge_count - 3; } if(num_cnt>0){ sge_bytes = sge_sz * num_cnt; frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; } /* Main frame */ frame_count +=1; if (frame_count > 7) frame_count = 8; return frame_count; } /** * megasas_build_dcdb - Prepares a direct cdb (DCDB) command * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared in * * This function prepares CDB commands. These are typcially pass-through * commands to the devices. */ static int megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 is_logical; u32 device_id; u16 flags = 0; struct megasas_pthru_frame *pthru; is_logical = MEGASAS_IS_LOGICAL(scp); device_id = MEGASAS_DEV_INDEX(instance, scp); pthru = (struct megasas_pthru_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; else if (scp->sc_data_direction == PCI_DMA_NONE) flags = MFI_FRAME_DIR_NONE; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the DCDB frame */ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; pthru->cmd_status = 0x0; pthru->scsi_status = 0x0; pthru->target_id = device_id; pthru->lun = scp->device->lun; pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; pthru->flags = flags; pthru->data_xfer_len = scsi_bufflen(scp); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); /* * If the command is for the tape device, set the * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = 0xFFFF; else pthru->timeout = scp->request->timeout / HZ; } /* * Construct SGL */ if (instance->flag_ieee == 1) { pthru->flags |= MFI_FRAME_SGL64; pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { pthru->flags |= MFI_FRAME_SGL64; pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else pthru->sge_count = megasas_make_sgl32(instance, scp, &pthru->sgl); if (pthru->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n", pthru->sge_count); return 0; } /* * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; pthru->sense_buf_phys_addr_hi = 0; pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count, PTHRU_FRAME); return cmd->frame_count; } /** * megasas_build_ldio - Prepares IOs to logical devices * @instance: Adapter soft state * @scp: SCSI command * @cmd: Command to be prepared * * Frames (and accompanying SGLs) for regular SCSI IOs use this function. */ static int megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, struct megasas_cmd *cmd) { u32 device_id; u8 sc = scp->cmnd[0]; u16 flags = 0; struct megasas_io_frame *ldio; device_id = MEGASAS_DEV_INDEX(instance, scp); ldio = (struct megasas_io_frame *)cmd->frame; if (scp->sc_data_direction == PCI_DMA_TODEVICE) flags = MFI_FRAME_DIR_WRITE; else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) flags = MFI_FRAME_DIR_READ; if (instance->flag_ieee == 1) { flags |= MFI_FRAME_IEEE; } /* * Prepare the Logical IO frame: 2nd bit is zero for all read cmds */ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; ldio->cmd_status = 0x0; ldio->scsi_status = 0x0; ldio->target_id = device_id; ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; ldio->flags = flags; ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; /* * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { ldio->lba_count = (u32) scp->cmnd[4]; ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; ldio->start_lba_lo &= 0x1FFFFF; } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { ldio->lba_count = (u32) scp->cmnd[8] | ((u32) scp->cmnd[7] << 8); ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { ldio->lba_count = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { ldio->lba_count = ((u32) scp->cmnd[10] << 24) | ((u32) scp->cmnd[11] << 16) | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | ((u32) scp->cmnd[7] << 16) | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | ((u32) scp->cmnd[3] << 16) | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; } /* * Construct SGL */ if (instance->flag_ieee) { ldio->flags |= MFI_FRAME_SGL64; ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { ldio->flags |= MFI_FRAME_SGL64; ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); if (ldio->sge_count > instance->max_num_sge) { printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n", ldio->sge_count); return 0; } /* * Sense info specific */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; /* * Compute the total number of frames this command consumes. FW uses * this number to pull sufficient number of frames from host memory. */ cmd->frame_count = megasas_get_frame_count(instance, ldio->sge_count, IO_FRAME); return cmd->frame_count; } /** * megasas_is_ldio - Checks if the cmd is for logical drive * @scmd: SCSI command * * Called by megasas_queue_command to find out if the command to be queued * is a logical drive command */ inline int megasas_is_ldio(struct scsi_cmnd *cmd) { if (!MEGASAS_IS_LOGICAL(cmd)) return 0; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: return 1; default: return 0; } } /** * megasas_dump_pending_frames - Dumps the frame address of all pending cmds * in FW * @instance: Adapter soft state */ static inline void megasas_dump_pending_frames(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i,n; union megasas_sgl *mfi_sgl; struct megasas_io_frame *ldio; struct megasas_pthru_frame *pthru; u32 sgcount; u32 max_cmd = instance->max_fw_cmds; printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); if (IS_DMA64) printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); else printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(!cmd->scmd) continue; printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); if (megasas_is_ldio(cmd->scmd)){ ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); } if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ for (n = 0; n < sgcount; n++){ if (IS_DMA64) printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; else printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; } } printk(KERN_ERR "\n"); } /*for max_cmd*/ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if(cmd->sync_cmd == 1){ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); } } printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); } u32 megasas_build_and_issue_cmd(struct megasas_instance *instance, struct scsi_cmnd *scmd) { struct megasas_cmd *cmd; u32 frame_count; cmd = megasas_get_cmd(instance); if (!cmd) return SCSI_MLQUEUE_HOST_BUSY; /* * Logical drive command */ if (megasas_is_ldio(scmd)) frame_count = megasas_build_ldio(instance, scmd, cmd); else frame_count = megasas_build_dcdb(instance, scmd, cmd); if (!frame_count) goto out_return_cmd; cmd->scmd = scmd; scmd->SCp.ptr = (char *)cmd; /* * Issue the command to the FW */ atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); /* * Check if we have pend cmds to be completed */ if (poll_mode_io && atomic_read(&instance->fw_outstanding)) tasklet_schedule(&instance->isr_tasklet); return 0; out_return_cmd: megasas_return_cmd(instance, cmd); return 1; } /** * megasas_queue_command - Queue entry point * @scmd: SCSI command to be queued * @done: Callback entry point */ static int megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) { struct megasas_instance *instance; unsigned long flags; instance = (struct megasas_instance *) scmd->device->host->hostdata; if (instance->issuepend_done == 0) return SCSI_MLQUEUE_HOST_BUSY; spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } spin_unlock_irqrestore(&instance->hba_lock, flags); scmd->scsi_done = done; scmd->result = 0; if (MEGASAS_IS_LOGICAL(scmd) && (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) { scmd->result = DID_BAD_TARGET << 16; goto out_done; } switch (scmd->cmnd[0]) { case SYNCHRONIZE_CACHE: /* * FW takes care of flush cache on its own * No need to send it down */ scmd->result = DID_OK << 16; goto out_done; default: break; } if (instance->instancet->build_and_issue_cmd(instance, scmd)) { printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n"); return SCSI_MLQUEUE_HOST_BUSY; } return 0; out_done: done(scmd); return 0; } static DEF_SCSI_QCMD(megasas_queue_command) static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; for (i = 0; i < megasas_mgmt_info.max_index; i++) { if ((megasas_mgmt_info.instance[i]) && (megasas_mgmt_info.instance[i]->host->host_no == host_no)) return megasas_mgmt_info.instance[i]; } return NULL; } static int megasas_slave_configure(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); /* * Don't export physical disk devices to the disk driver. * * FIXME: Currently we don't export them to the midlayer at all. * That will be fixed once LSI engineers have audited the * firmware for possible issues. */ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } return -ENXIO; } /* * The RAID firmware may require extended timeouts. */ blk_queue_rq_timeout(sdev->request_queue, MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); return 0; } static int megasas_slave_alloc(struct scsi_device *sdev) { u16 pd_index = 0; struct megasas_instance *instance ; instance = megasas_lookup_instance(sdev->host->host_no); if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && (sdev->type == TYPE_DISK)) { /* * Open the OS scan to the SYSTEM PD */ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && (instance->pd_list[pd_index].driveType == TYPE_DISK)) { return 0; } return -ENXIO; } return 0; } void megaraid_sas_kill_hba(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } } /** * megasas_check_and_restore_queue_depth - Check if queue depth needs to be * restored to max value * @instance: Adapter soft state * */ void megasas_check_and_restore_queue_depth(struct megasas_instance *instance) { unsigned long flags; if (instance->flag & MEGASAS_FW_BUSY && time_after(jiffies, instance->last_time + 5 * HZ) && atomic_read(&instance->fw_outstanding) < 17) { spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else instance->host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; spin_unlock_irqrestore(instance->host->host_lock, flags); } } /** * megasas_complete_cmd_dpc - Returns FW's controller structure * @instance_addr: Address of adapter soft state * * Tasklet to complete cmds */ static void megasas_complete_cmd_dpc(unsigned long instance_addr) { u32 producer; u32 consumer; u32 context; struct megasas_cmd *cmd; struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; /* If we have already declared adapter dead, donot complete cmds */ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR ) return; spin_lock_irqsave(&instance->completion_lock, flags); producer = *instance->producer; consumer = *instance->consumer; while (consumer != producer) { context = instance->reply_queue[consumer]; if (context >= instance->max_fw_cmds) { printk(KERN_ERR "Unexpected context value %x\n", context); BUG(); } cmd = instance->cmd_list[context]; megasas_complete_cmd(instance, cmd, DID_OK); consumer++; if (consumer == (instance->max_fw_cmds + 1)) { consumer = 0; } } *instance->consumer = producer; spin_unlock_irqrestore(&instance->completion_lock, flags); /* * Check if we can restore can_queue */ megasas_check_and_restore_queue_depth(instance); } static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance); static void process_fw_state_change_wq(struct work_struct *work); void megasas_do_ocr(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; } instance->instancet->disable_intr(instance->reg_set); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); process_fw_state_change_wq(&instance->work_init); } /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state * * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to * complete all its outstanding commands. Returns error if one or more IOs * are pending after this time period. It also marks the controller dead. */ static int megasas_wait_for_outstanding(struct megasas_instance *instance) { int i; u32 reset_index; u32 wait_time = MEGASAS_RESET_WAIT_TIME; u8 adprecovery; unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; u32 fw_state; u8 kill_adapter_flag; spin_lock_irqsave(&instance->hba_lock, flags); adprecovery = instance->adprecovery; spin_unlock_irqrestore(&instance->hba_lock, flags); if (adprecovery != MEGASAS_HBA_OPERATIONAL) { INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_NOTICE "megasas: HBA reset wait ...\n"); for (i = 0; i < wait_time; i++) { msleep(1000); spin_lock_irqsave(&instance->hba_lock, flags); adprecovery = instance->adprecovery; spin_unlock_irqrestore(&instance->hba_lock, flags); if (adprecovery == MEGASAS_HBA_OPERATIONAL) break; } if (adprecovery != MEGASAS_HBA_OPERATIONAL) { printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n"); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; spin_unlock_irqrestore(&instance->hba_lock, flags); return FAILED; } reset_index = 0; while (!list_empty(&clist_local)) { reset_cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&reset_cmd->list); if (reset_cmd->scmd) { reset_cmd->scmd->result = DID_RESET << 16; printk(KERN_NOTICE "%d:%p reset [%02x]\n", reset_index, reset_cmd, reset_cmd->scmd->cmnd[0]); reset_cmd->scmd->scsi_done(reset_cmd->scmd); megasas_return_cmd(instance, reset_cmd); } else if (reset_cmd->sync_cmd) { printk(KERN_NOTICE "megasas:%p synch cmds" "reset queue\n", reset_cmd); reset_cmd->cmd_status = ENODATA; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); } else { printk(KERN_NOTICE "megasas: %p unexpected" "cmds lst\n", reset_cmd); } reset_index++; } return SUCCESS; } for (i = 0; i < wait_time; i++) { int outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) break; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: [%2d]waiting for %d " "commands to complete\n",i,outstanding); /* * Call cmd completion routine. Cmd to be * be completed directly without depending on isr. */ megasas_complete_cmd_dpc((unsigned long)instance); } msleep(1000); } i = 0; kill_adapter_flag = 0; do { fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { if (i == 3) { kill_adapter_flag = 2; break; } megasas_do_ocr(instance); kill_adapter_flag = 1; /* wait for 1 secs to let FW finish the pending cmds */ msleep(1000); } i++; } while (i <= 3); if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) { if (instance->disableOnlineCtrlReset == 0) { megasas_do_ocr(instance); /* wait for 5 secs to let FW finish the pending cmds */ for (i = 0; i < wait_time; i++) { int outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) return SUCCESS; msleep(1000); } } } if (atomic_read(&instance->fw_outstanding) || (kill_adapter_flag == 2)) { printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); /* * Send signal to FW to stop processing any pending cmds. * The controller will be taken offline by the OS now. */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } megasas_dump_pending_frames(instance); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; spin_unlock_irqrestore(&instance->hba_lock, flags); return FAILED; } printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n"); return SUCCESS; } /** * megasas_generic_reset - Generic reset routine * @scmd: Mid-layer SCSI command * * This routine implements a generic reset handler for device, bus and host * reset requests. Device, bus and host specific reset handlers can use this * function after they do their specific tasks. */ static int megasas_generic_reset(struct scsi_cmnd *scmd) { int ret_val; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n", scmd->cmnd[0], scmd->retries); if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "megasas: cannot recover from previous reset " "failures\n"); return FAILED; } ret_val = megasas_wait_for_outstanding(instance); if (ret_val == SUCCESS) printk(KERN_NOTICE "megasas: reset successful \n"); else printk(KERN_ERR "megasas: failed to do reset\n"); return ret_val; } /** * megasas_reset_timer - quiesce the adapter if required * @scmd: scsi cmnd * * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ static enum blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) { return BLK_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); instance->host->can_queue = 16; instance->last_time = jiffies; instance->flag |= MEGASAS_FW_BUSY; spin_unlock_irqrestore(instance->host->host_lock, flags); } return BLK_EH_RESET_TIMER; } /** * megasas_reset_device - Device reset handler entry point */ static int megasas_reset_device(struct scsi_cmnd *scmd) { int ret; /* * First wait for all commands to complete */ ret = megasas_generic_reset(scmd); return ret; } /** * megasas_reset_bus_host - Bus & host reset handler entry point */ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) { int ret; struct megasas_instance *instance; instance = (struct megasas_instance *)scmd->device->host->hostdata; /* * First wait for all commands to complete */ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ret = megasas_reset_fusion(scmd->device->host); else ret = megasas_generic_reset(scmd); return ret; } /** * megasas_bios_param - Returns disk geometry for a disk * @sdev: device handle * @bdev: block device * @capacity: drive capacity * @geom: geometry parameters */ static int megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { int heads; int sectors; sector_t cylinders; unsigned long tmp; /* Default heads (64) & sectors (32) */ heads = 64; sectors = 32; tmp = heads * sectors; cylinders = capacity; sector_div(cylinders, tmp); /* * Handle extended translation size for logical drives > 1Gb */ if (capacity >= 0x200000) { heads = 255; sectors = 63; tmp = heads*sectors; cylinders = capacity; sector_div(cylinders, tmp); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void megasas_aen_polling(struct work_struct *work); /** * megasas_service_aen - Processes an event notification * @instance: Adapter soft state * @cmd: AEN command completed by the ISR * * For AEN, driver sends a command down to FW that is held by the FW till an * event occurs. When an event of interest occurs, FW completes the command * that it was previously holding. * * This routines sends SIGIO signal to processes that have registered with the * driver for AEN. */ static void megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) { unsigned long flags; /* * Don't signal app if it is just an aborted previously registered aen */ if ((!cmd->abort_aen) && (instance->unload == 0)) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 1; spin_unlock_irqrestore(&poll_aen_lock, flags); wake_up(&megasas_poll_wait); kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); } else cmd->abort_aen = 0; instance->aen_cmd = NULL; megasas_return_cmd(instance, cmd); if ((instance->unload == 0) && ((instance->issuepend_done == 1))) { struct megasas_aen_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) { printk(KERN_ERR "megasas_service_aen: out of memory\n"); } else { ev->instance = instance; instance->ev = ev; INIT_WORK(&ev->hotplug_work, megasas_aen_polling); schedule_delayed_work( (struct delayed_work *)&ev->hotplug_work, 0); } } } /* * Scsi host template for megaraid_sas driver */ static struct scsi_host_template megasas_template = { .module = THIS_MODULE, .name = "LSI SAS based MegaRAID driver", .proc_name = "megaraid_sas", .slave_configure = megasas_slave_configure, .slave_alloc = megasas_slave_alloc, .queuecommand = megasas_queue_command, .eh_device_reset_handler = megasas_reset_device, .eh_bus_reset_handler = megasas_reset_bus_host, .eh_host_reset_handler = megasas_reset_bus_host, .eh_timed_out = megasas_reset_timer, .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, }; /** * megasas_complete_int_cmd - Completes an internal command * @instance: Adapter soft state * @cmd: Command to be completed * * The megasas_issue_blocked_cmd() function waits for a command to complete * after it issues a command. This function wakes up that waiting routine by * calling wake_up() on the wait queue. */ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == ENODATA) { cmd->cmd_status = 0; } wake_up(&instance->int_cmd_wait_q); } /** * megasas_complete_abort - Completes aborting a command * @instance: Adapter soft state * @cmd: Cmd that was issued to abort another cmd * * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q * after it issues an abort on a previously issued command. This function * wakes up all functions waiting on the same wait queue. */ static void megasas_complete_abort(struct megasas_instance *instance, struct megasas_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; wake_up(&instance->abort_cmd_wait_q); } return; } /** * megasas_complete_cmd - Completes a command * @instance: Adapter soft state * @cmd: Command to be completed * @alt_status: If non-zero, use this value as status to * SCSI mid-layer instead of the value returned * by the FW. This should be used if caller wants * an alternate status (as in the case of aborted * commands) */ void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status) { int exception = 0; struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; if (cmd->scmd) cmd->scmd->SCp.ptr = NULL; switch (hdr->cmd) { case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; megasas_complete_int_cmd(instance, cmd); break; } case MFI_CMD_LD_READ: case MFI_CMD_LD_WRITE: if (alt_status) { cmd->scmd->result = alt_status << 16; exception = 1; } if (exception) { atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; } switch (hdr->cmd_status) { case MFI_STAT_OK: cmd->scmd->result = DID_OK << 16; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { memset(cmd->scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(cmd->scmd->sense_buffer, cmd->sense, hdr->sense_len); cmd->scmd->result |= DRIVER_SENSE << 24; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; default: printk(KERN_DEBUG "megasas: MFI FW status %#x\n", hdr->cmd_status); cmd->scmd->result = DID_ERROR << 16; break; } atomic_dec(&instance->fw_outstanding); scsi_dma_unmap(cmd->scmd); cmd->scmd->scsi_done(cmd->scmd); megasas_return_cmd(instance, cmd); break; case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: /* Check for LD map update */ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { spin_lock_irqsave(instance->host->host_lock, flags); if (cmd->frame->hdr.cmd_status != 0) { if (cmd->frame->hdr.cmd_status != MFI_STAT_NOT_FOUND) printk(KERN_WARNING "megasas: map sync" "failed, status = 0x%x.\n", cmd->frame->hdr.cmd_status); else { megasas_return_cmd(instance, cmd); spin_unlock_irqrestore( instance->host->host_lock, flags); break; } } else instance->map_id++; megasas_return_cmd(instance, cmd); if (MR_ValidateMapInfo( fusion->ld_map[(instance->map_id & 1)], fusion->load_balance_info)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; megasas_sync_map_info(instance); spin_unlock_irqrestore(instance->host->host_lock, flags); break; } if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); } /* * See if got an event notification */ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); break; case MFI_CMD_ABORT: /* * Cmd issued to abort another cmd returned */ megasas_complete_abort(instance, cmd); break; default: printk("megasas: Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /** * megasas_issue_pending_cmds_again - issue all pending cmds * in FW again because of the fw reset * @instance: Adapter soft state */ static inline void megasas_issue_pending_cmds_again(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct list_head clist_local; union megasas_evt_class_locale class_locale; unsigned long flags; u32 seq_num; INIT_LIST_HEAD(&clist_local); spin_lock_irqsave(&instance->hba_lock, flags); list_splice_init(&instance->internal_reset_pending_q, &clist_local); spin_unlock_irqrestore(&instance->hba_lock, flags); while (!list_empty(&clist_local)) { cmd = list_entry((&clist_local)->next, struct megasas_cmd, list); list_del_init(&cmd->list); if (cmd->sync_cmd || cmd->scmd) { printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d" "detected to be pending while HBA reset.\n", cmd, cmd->scmd, cmd->sync_cmd); cmd->retry_for_fw_reset++; if (cmd->retry_for_fw_reset == 3) { printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d" "was tried multiple times during reset." "Shutting down the HBA\n", cmd, cmd->scmd, cmd->sync_cmd); megaraid_sas_kill_hba(instance); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; return; } } if (cmd->sync_cmd == 1) { if (cmd->scmd) { printk(KERN_NOTICE "megaraid_sas: unexpected" "cmd attached to internal command!\n"); } printk(KERN_NOTICE "megasas: %p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); cmd->cmd_status = ENODATA; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr , 0, instance->reg_set); } else if (cmd->scmd) { printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]" "detected on the internal queue, issue again.\n", cmd, cmd->scmd->cmnd[0]); atomic_inc(&instance->fw_outstanding); instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, cmd->frame_count-1, instance->reg_set); } else { printk(KERN_NOTICE "megasas: %p unexpected cmd on the" "internal reset defer list while re-issue!!\n", cmd); } } if (instance->aen_cmd) { printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n"); megasas_return_cmd(instance, instance->aen_cmd); instance->aen_cmd = NULL; } /* * Initiate AEN (Asynchronous Event Notification) */ seq_num = instance->last_seq_num; class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; megasas_register_aen(instance, seq_num, class_locale.word); } /** * Move the internal reset pending commands to a deferred queue. * * We move the commands pending at internal reset time to a * pending queue. This queue would be flushed after successful * completion of the internal reset sequence. if the internal reset * did not complete in time, the kernel reset handler would flush * these commands. **/ static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance) { struct megasas_cmd *cmd; int i; u32 max_cmd = instance->max_fw_cmds; u32 defer_index; unsigned long flags; defer_index = 0; spin_lock_irqsave(&instance->cmd_pool_lock, flags); for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->sync_cmd == 1 || cmd->scmd) { printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p" "on the defer queue as internal\n", defer_index, cmd, cmd->sync_cmd, cmd->scmd); if (!list_empty(&cmd->list)) { printk(KERN_NOTICE "megaraid_sas: ERROR while" " moving this cmd:%p, %d %p, it was" "discovered on some list?\n", cmd, cmd->sync_cmd, cmd->scmd); list_del_init(&cmd->list); } defer_index++; list_add_tail(&cmd->list, &instance->internal_reset_pending_q); } } spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); } static void process_fw_state_change_wq(struct work_struct *work) { struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); u32 wait; unsigned long flags; if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) { printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n", instance->adprecovery); return ; } if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault" "state, restarting it...\n"); instance->instancet->disable_intr(instance->reg_set); atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->fw_reset_no_pci_access, 1); instance->instancet->adp_reset(instance, instance->reg_set); atomic_set(&instance->fw_reset_no_pci_access, 0 ); printk(KERN_NOTICE "megaraid_sas: FW restarted successfully," "initiating next stage...\n"); printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine," "state 2 starting...\n"); /*waitting for about 20 second before start the second init*/ for (wait = 0; wait < 30; wait++) { msleep(1000); } if (megasas_transition_to_ready(instance)) { printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); megaraid_sas_kill_hba(instance); instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; return ; } if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR) ) { *instance->consumer = *instance->producer; } else { *instance->consumer = 0; *instance->producer = 0; } megasas_issue_init_mfi(instance); spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; spin_unlock_irqrestore(&instance->hba_lock, flags); instance->instancet->enable_intr(instance->reg_set); megasas_issue_pending_cmds_again(instance); instance->issuepend_done = 1; } return ; } /** * megasas_deplete_reply_queue - Processes all completed commands * @instance: Adapter soft state * @alt_status: Alternate status to be returned to * SCSI mid-layer instead of the status * returned by the FW * Note: this must be called with hba lock held */ static int megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) { u32 mfiStatus; u32 fw_state; if ((mfiStatus = instance->instancet->check_reset(instance, instance->reg_set)) == 1) { return IRQ_HANDLED; } if ((mfiStatus = instance->instancet->clear_intr( instance->reg_set) ) == 0) { /* Hardware may not set outbound_intr_status in MSI-X mode */ if (!instance->msi_flag) return IRQ_NONE; } instance->mfiStatus = mfiStatus; if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) { fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_FAULT) { printk(KERN_NOTICE "megaraid_sas: fw state:%x\n", fw_state); } if ((fw_state == MFI_STATE_FAULT) && (instance->disableOnlineCtrlReset == 0)) { printk(KERN_NOTICE "megaraid_sas: wait adp restart\n"); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; } instance->instancet->disable_intr(instance->reg_set); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; atomic_set(&instance->fw_outstanding, 0); megasas_internal_reset_defer_cmds(instance); printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n", fw_state, instance->adprecovery); schedule_work(&instance->work_init); return IRQ_HANDLED; } else { printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n", fw_state, instance->disableOnlineCtrlReset); } } tasklet_schedule(&instance->isr_tasklet); return IRQ_HANDLED; } /** * megasas_isr - isr entry point */ static irqreturn_t megasas_isr(int irq, void *devp) { struct megasas_instance *instance; unsigned long flags; irqreturn_t rc; if (atomic_read( &(((struct megasas_instance *)devp)->fw_reset_no_pci_access))) return IRQ_HANDLED; instance = (struct megasas_instance *)devp; spin_lock_irqsave(&instance->hba_lock, flags); rc = megasas_deplete_reply_queue(instance, DID_OK); spin_unlock_irqrestore(&instance->hba_lock, flags); return rc; } /** * megasas_transition_to_ready - Move the FW to READY state * @instance: Adapter soft state * * During the initialization, FW passes can potentially be in any one of * several possible states. If the FW in operational, waiting-for-handshake * states, driver must take steps to bring it to ready state. Otherwise, it * has to wait for the ready state. */ int megasas_transition_to_ready(struct megasas_instance* instance) { int i; u8 max_wait; u32 fw_state; u32 cur_state; u32 abs_state, curr_abs_state; fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) printk(KERN_INFO "megasas: Waiting for FW to come to ready" " state\n"); while (fw_state != MFI_STATE_READY) { abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); switch (fw_state) { case MFI_STATE_FAULT: printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FAULT; break; case MFI_STATE_WAIT_HANDSHAKE: /* * Set the CLR bit in inbound doorbell */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); } max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else writel(MFI_INIT_HOTPLUG, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 secs */ instance->instancet->disable_intr(instance->reg_set); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> reg_set-> doorbell) & 1) msleep(20); else break; } } } else writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell); max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 seconds */ max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: max_wait = MEGASAS_RESET_WAIT_TIME; cur_state = MFI_STATE_FLUSH_CACHE; break; default: printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK ; curr_abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); } else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { printk(KERN_DEBUG "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } printk(KERN_INFO "megasas: FW now in Ready state\n"); return 0; } /** * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool * @instance: Adapter soft state */ static void megasas_teardown_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd = instance->max_mfi_cmds; struct megasas_cmd *cmd; if (!instance->frame_dma_pool) return; /* * Return all frames to pool */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; if (cmd->frame) pci_pool_free(instance->frame_dma_pool, cmd->frame, cmd->frame_phys_addr); if (cmd->sense) pci_pool_free(instance->sense_dma_pool, cmd->sense, cmd->sense_phys_addr); } /* * Now destroy the pool itself */ pci_pool_destroy(instance->frame_dma_pool); pci_pool_destroy(instance->sense_dma_pool); instance->frame_dma_pool = NULL; instance->sense_dma_pool = NULL; } /** * megasas_create_frame_pool - Creates DMA pool for cmd frames * @instance: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. */ static int megasas_create_frame_pool(struct megasas_instance *instance) { int i; u32 max_cmd; u32 sge_sz; u32 sgl_sz; u32 total_sz; u32 frame_count; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * Size of our frame is 64 bytes for MFI frame, followed by max SG * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer */ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : sizeof(struct megasas_sge32); if (instance->flag_ieee) { sge_sz = sizeof(struct megasas_sge_skinny); } /* * Calculated the number of 64byte frames required for SGL */ sgl_sz = sge_sz * instance->max_num_sge; frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; frame_count = 15; /* * We need one extra frame for the MFI command */ frame_count++; total_sz = MEGAMFI_FRAME_SIZE * frame_count; /* * Use DMA pool facility provided by PCI layer */ instance->frame_dma_pool = pci_pool_create("megasas frame pool", instance->pdev, total_sz, 64, 0); if (!instance->frame_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); return -ENOMEM; } instance->sense_dma_pool = pci_pool_create("megasas sense pool", instance->pdev, 128, 4, 0); if (!instance->sense_dma_pool) { printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); pci_pool_destroy(instance->frame_dma_pool); instance->frame_dma_pool = NULL; return -ENOMEM; } /* * Allocate and attach a frame to each of the commands in cmd_list. * By making cmd->index as the context instead of the &cmd, we can * always use 32bit context regardless of the architecture */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; cmd->frame = pci_pool_alloc(instance->frame_dma_pool, GFP_KERNEL, &cmd->frame_phys_addr); cmd->sense = pci_pool_alloc(instance->sense_dma_pool, GFP_KERNEL, &cmd->sense_phys_addr); /* * megasas_teardown_frame_pool() takes care of freeing * whatever has been allocated */ if (!cmd->frame || !cmd->sense) { printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); megasas_teardown_frame_pool(instance); return -ENOMEM; } memset(cmd->frame, 0, total_sz); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } return 0; } /** * megasas_free_cmds - Free all the cmds in the free cmd pool * @instance: Adapter soft state */ void megasas_free_cmds(struct megasas_instance *instance) { int i; /* First free the MFI frame pool */ megasas_teardown_frame_pool(instance); /* Free all the commands in the cmd_list */ for (i = 0; i < instance->max_mfi_cmds; i++) kfree(instance->cmd_list[i]); /* Free the cmd_list buffer itself */ kfree(instance->cmd_list); instance->cmd_list = NULL; INIT_LIST_HEAD(&instance->cmd_pool); } /** * megasas_alloc_cmds - Allocates the command packets * @instance: Adapter soft state * * Each command that is issued to the FW, whether IO commands from the OS or * internal commands like IOCTLs, are wrapped in local data structure called * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to * the FW. * * Each frame has a 32-bit field called context (tag). This context is used * to get back the megasas_cmd from the frame when a frame gets completed in * the ISR. Typically the address of the megasas_cmd itself would be used as * the context. But we wanted to keep the differences between 32 and 64 bit * systems to the mininum. We always use 32 bit integers for the context. In * this driver, the 32 bit values are the indices into an array cmd_list. * This array is used only to look up the megasas_cmd given the context. The * free commands themselves are maintained in a linked list called cmd_pool. */ int megasas_alloc_cmds(struct megasas_instance *instance) { int i; int j; u32 max_cmd; struct megasas_cmd *cmd; max_cmd = instance->max_mfi_cmds; /* * instance->cmd_list is an array of struct megasas_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); if (!instance->cmd_list) { printk(KERN_DEBUG "megasas: out of memory\n"); return -ENOMEM; } memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); for (i = 0; i < max_cmd; i++) { instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL); if (!instance->cmd_list[i]) { for (j = 0; j < i; j++) kfree(instance->cmd_list[j]); kfree(instance->cmd_list); instance->cmd_list = NULL; return -ENOMEM; } } /* * Add all the commands to command pool (instance->cmd_pool) */ for (i = 0; i < max_cmd; i++) { cmd = instance->cmd_list[i]; memset(cmd, 0, sizeof(struct megasas_cmd)); cmd->index = i; cmd->scmd = NULL; cmd->instance = instance; list_add_tail(&cmd->list, &instance->cmd_pool); } /* * Create a frame pool and assign one frame to each cmd */ if (megasas_create_frame_pool(instance)) { printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); megasas_free_cmds(instance); } return 0; } /* * megasas_get_pd_list_info - Returns FW's pd_list structure * @instance: Adapter soft state * @pd_list: pd_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_pd_list(struct megasas_instance *instance) { int ret = 0, pd_index = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_PD_LIST *ci; struct MR_PD_ADDRESS *pd_addr; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } /* * the following function will get the instance PD LIST. */ pd_addr = ci->addr; if ( ret == 0 && (ci->count < (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { memset(instance->pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); for (pd_index = 0; pd_index < ci->count; pd_index++) { instance->pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; instance->pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; instance->pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } } pci_free_consistent(instance->pdev, MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /* * megasas_get_ld_list_info - Returns FW's ld_list structure * @instance: Adapter soft state * @ld_list: ld_list structure * * Issues an internal command (DCMD) to get the FW's controller PD * list structure. This information is mainly used to find out SYSTEM * supported by the FW. */ static int megasas_get_ld_list(struct megasas_instance *instance) { int ret = 0, ld_index = 0, ids = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct MR_LD_LIST), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd)) { ret = 0; } else { ret = -1; } /* the following function will get the instance PD LIST */ if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId; } } } pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * @ctrl_info: Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. * This information is mainly used to find out the maximum IO transfer per * command supported by the FW. */ static int megasas_get_ctrl_info(struct megasas_instance *instance, struct megasas_ctrl_info *ctrl_info) { int ret = 0; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_ctrl_info *ci; dma_addr_t ci_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; ci = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), &ci_h); if (!ci) { printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(ci, 0, sizeof(*ci)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->sgl.sge32[0].phys_addr = ci_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); if (!megasas_issue_polled(instance, cmd)) { ret = 0; memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); } else { ret = -1; } pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), ci, ci_h); megasas_return_cmd(instance, cmd); return ret; } /** * megasas_issue_init_mfi - Initializes the FW * @instance: Adapter soft state * * Issues the INIT MFI cmd */ static int megasas_issue_init_mfi(struct megasas_instance *instance) { u32 context; struct megasas_cmd *cmd; struct megasas_init_frame *init_frame; struct megasas_init_queue_info *initq_info; dma_addr_t init_frame_h; dma_addr_t initq_info_h; /* * Prepare a init frame. Note the init frame points to queue info * structure. Each frame has SGL allocated after first 64 bytes. For * this frame - since we don't need any SGL - we use SGL's space as * queue info structure * * We will not get a NULL command below. We just created the pool. */ cmd = megasas_get_cmd(instance); init_frame = (struct megasas_init_frame *)cmd->frame; initq_info = (struct megasas_init_queue_info *) ((unsigned long)init_frame + 64); init_frame_h = cmd->frame_phys_addr; initq_info_h = init_frame_h + 64; context = init_frame->context; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; initq_info->reply_queue_entries = instance->max_fw_cmds + 1; initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; initq_info->producer_index_phys_addr_lo = instance->producer_h; initq_info->consumer_index_phys_addr_lo = instance->consumer_h; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->queue_info_new_phys_addr_lo = initq_info_h; init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); /* * disable the intr before firing the init frame to FW */ instance->instancet->disable_intr(instance->reg_set); /* * Issue the init frame in polled mode */ if (megasas_issue_polled(instance, cmd)) { printk(KERN_ERR "megasas: Failed to init firmware\n"); megasas_return_cmd(instance, cmd); goto fail_fw_init; } megasas_return_cmd(instance, cmd); return 0; fail_fw_init: return -EINVAL; } /** * megasas_start_timer - Initializes a timer object * @instance: Adapter soft state * @timer: timer object to be initialized * @fn: timer function * @interval: time interval between timer function call */ static inline void megasas_start_timer(struct megasas_instance *instance, struct timer_list *timer, void *fn, unsigned long interval) { init_timer(timer); timer->expires = jiffies + interval; timer->data = (unsigned long)instance; timer->function = fn; add_timer(timer); } /** * megasas_io_completion_timer - Timer fn * @instance_addr: Address of adapter soft state * * Schedules tasklet for cmd completion * if poll_mode_io is set */ static void megasas_io_completion_timer(unsigned long instance_addr) { struct megasas_instance *instance = (struct megasas_instance *)instance_addr; if (atomic_read(&instance->fw_outstanding)) tasklet_schedule(&instance->isr_tasklet); /* Restart timer */ if (poll_mode_io) mod_timer(&instance->io_completion_timer, jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); } static u32 megasas_init_adapter_mfi(struct megasas_instance *instance) { struct megasas_register_set __iomem *reg_set; u32 context_sz; u32 reply_q_sz; reg_set = instance->reg_set; /* * Get various operational parameters from status register */ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; /* * Reduce the max supported cmds by 1. This is to ensure that the * reply_q_sz (1 more than the max cmd that driver may send) * does not exceed max cmds that the FW can support */ instance->max_fw_cmds = instance->max_fw_cmds-1; instance->max_mfi_cmds = instance->max_fw_cmds; instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> 0x10; /* * Create a pool of commands */ if (megasas_alloc_cmds(instance)) goto fail_alloc_cmds; /* * Allocate memory for reply queue. Length of reply queue should * be _one_ more than the maximum commands handled by the firmware. * * Note: When FW completes commands, it places corresponding contex * values in this circular reply queue. This circular queue is a fairly * typical producer-consumer queue. FW is the producer (of completed * commands) and the driver is the consumer. */ context_sz = sizeof(u32); reply_q_sz = context_sz * (instance->max_fw_cmds + 1); instance->reply_queue = pci_alloc_consistent(instance->pdev, reply_q_sz, &instance->reply_queue_h); if (!instance->reply_queue) { printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); goto fail_reply_queue; } if (megasas_issue_init_mfi(instance)) goto fail_fw_init; instance->fw_support_ieee = 0; instance->fw_support_ieee = (instance->instancet->read_fw_status_reg(reg_set) & 0x04000000); printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d", instance->fw_support_ieee); if (instance->fw_support_ieee) instance->flag_ieee = 1; return 0; fail_fw_init: pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: return 1; } /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state * * This is the main function for initializing firmware */ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2; u32 tmp_sectors, msix_enable; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info; unsigned long bar_list; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); instance->base_addr = pci_resource_start(instance->pdev, instance->bar); if (pci_request_selected_regions(instance->pdev, instance->bar, "megasas: LSI")) { printk(KERN_DEBUG "megasas: IO memory region busy!\n"); return -EBUSY; } instance->reg_set = ioremap_nocache(instance->base_addr, 8192); if (!instance->reg_set) { printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); goto fail_ioremap; } reg_set = instance->reg_set; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: instance->instancet = &megasas_instance_template_fusion; break; case PCI_DEVICE_ID_LSI_SAS1078R: case PCI_DEVICE_ID_LSI_SAS1078DE: instance->instancet = &megasas_instance_template_ppc; break; case PCI_DEVICE_ID_LSI_SAS1078GEN2: case PCI_DEVICE_ID_LSI_SAS0079GEN2: instance->instancet = &megasas_instance_template_gen2; break; case PCI_DEVICE_ID_LSI_SAS0073SKINNY: case PCI_DEVICE_ID_LSI_SAS0071SKINNY: instance->instancet = &megasas_instance_template_skinny; break; case PCI_DEVICE_ID_LSI_SAS1064R: case PCI_DEVICE_ID_DELL_PERC5: default: instance->instancet = &megasas_instance_template_xscale; break; } /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance)) goto fail_ready_state; /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable && !pci_enable_msix(instance->pdev, &instance->msixentry, 1)) instance->msi_flag = 1; /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) goto fail_init_adapter; printk(KERN_ERR "megasas: INIT adapter done\n"); /** for passthrough * the following function will get the PD LIST. */ memset(instance->pd_list, 0 , (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); megasas_get_pd_list(instance); memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); megasas_get_ld_list(instance); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information * to calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * ctrl_info->max_strips_per_io; max_sectors_2 = ctrl_info->max_request_size; tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) instance->max_sectors_per_req = tmp_sectors; kfree(ctrl_info); /* * Setup tasklet for cmd completion */ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* Initialize the cmd completion timer */ if (poll_mode_io) megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); return 0; fail_init_adapter: fail_ready_state: iounmap(instance->reg_set); fail_ioremap: pci_release_selected_regions(instance->pdev, instance->bar); return -EINVAL; } /** * megasas_release_mfi - Reverses the FW initialization * @intance: Adapter soft state */ static void megasas_release_mfi(struct megasas_instance *instance) { u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); if (instance->reply_queue) pci_free_consistent(instance->pdev, reply_q_sz, instance->reply_queue, instance->reply_queue_h); megasas_free_cmds(instance); iounmap(instance->reg_set); pci_release_selected_regions(instance->pdev, instance->bar); } /** * megasas_get_seq_num - Gets latest event sequence numbers * @instance: Adapter soft state * @eli: FW event log sequence numbers information * * FW maintains a log of all events in a non-volatile area. Upper layers would * usually find out the latest sequence number of the events, the seq number at * the boot etc. They would "read" all the events below the latest seq number * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq * number), they would subsribe to AEN (asynchronous event notification) and * wait for the events to happen. */ static int megasas_get_seq_num(struct megasas_instance *instance, struct megasas_evt_log_info *eli) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; struct megasas_evt_log_info *el_info; dma_addr_t el_info_h = 0; cmd = megasas_get_cmd(instance); if (!cmd) { return -ENOMEM; } dcmd = &cmd->frame->dcmd; el_info = pci_alloc_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), &el_info_h); if (!el_info) { megasas_return_cmd(instance, cmd); return -ENOMEM; } memset(el_info, 0, sizeof(*el_info)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->sgl.sge32[0].phys_addr = el_info_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); megasas_issue_blocked_cmd(instance, cmd); /* * Copy the data back into callers buffer */ memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); megasas_return_cmd(instance, cmd); return 0; } /** * megasas_register_aen - Registers for asynchronous event notification * @instance: Adapter soft state * @seq_num: The starting sequence number * @class_locale: Class of the event * * This function subscribes for AEN for events beyond the @seq_num. It requests * to be notified if and only if the event is of type @class_locale */ static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word) { int ret_val; struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; union megasas_evt_class_locale curr_aen; union megasas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new * AEN request we currently have. If it is, then we don't have * to do anything. In other words, whichever events the current * AEN request is subscribing to, have already been subscribed * to. * * If the old_cmd is _not_ inclusive, then we have to abort * that command, form a class_locale that is superset of both * old and current and re-issue to the FW */ curr_aen.word = class_locale_word; if (instance->aen_cmd) { prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. * * Locale numbers don't have such hierarchy. They are bitmap * values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> aen_cmd); if (ret_val) { printk(KERN_DEBUG "megasas: Failed to abort " "previous AEN command\n"); return ret_val; } } } cmd = megasas_get_cmd(instance); if (!cmd) return -ENOMEM; dcmd = &cmd->frame->dcmd; memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; instance->last_seq_num = seq_num; dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; dcmd->mbox.w[1] = curr_aen.word; dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ instance->aen_cmd = cmd; /* * Issue the aen registration frame */ instance->instancet->issue_dcmd(instance, cmd); return 0; } /** * megasas_start_aen - Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int megasas_start_aen(struct megasas_instance *instance) { struct megasas_evt_log_info eli; union megasas_evt_class_locale class_locale; /* * Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (megasas_get_seq_num(instance, &eli)) return -1; /* * Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return megasas_register_aen(instance, eli.newest_seq_num + 1, class_locale.word); } /** * megasas_io_attach - Attaches this driver to SCSI mid-layer * @instance: Adapter soft state */ static int megasas_io_attach(struct megasas_instance *instance) { struct Scsi_Host *host = instance->host; /* * Export parameters required by SCSI mid-layer */ host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; if (instance->fw_support_ieee) instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; /* * Check if the module parameter value for max_sectors can be used */ if (max_sectors && max_sectors < instance->max_sectors_per_req) instance->max_sectors_per_req = max_sectors; else { if (max_sectors) { if (((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) && (max_sectors <= MEGASAS_MAX_SECTORS)) { instance->max_sectors_per_req = max_sectors; } else { printk(KERN_INFO "megasas: max_sectors should be > 0" "and <= %d (or < 1MB for GEN2 controller)\n", instance->max_sectors_per_req); } } } host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; host->max_channel = MEGASAS_MAX_CHANNELS - 1; host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; host->max_lun = MEGASAS_MAX_LUN; host->max_cmd_len = 16; /* Fusion only supports host reset */ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; } /* * Notify the mid-layer about the new controller */ if (scsi_add_host(host, &instance->pdev->dev)) { printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); return -ENODEV; } /* * Trigger SCSI to scan our drives */ scsi_scan_host(host); return 0; } static int megasas_set_dma_mask(struct pci_dev *pdev) { /* * All our contollers are capable of performing 64-bit DMA */ if (IS_DMA64) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } } else { if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } return 0; fail_set_dma_mask: return 1; } /** * megasas_probe_one - PCI hotplug entry point * @pdev: PCI device structure * @id: PCI ids of supported hotplugged adapter */ static int __devinit megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; /* Reset MSI-X in the kdump kernel */ if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { pci_read_config_word(pdev, msi_control_reg(pos), &control); if (control & PCI_MSIX_FLAGS_ENABLE) { dev_info(&pdev->dev, "resetting MSI-X\n"); pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); } } } /* * Announce PCI information */ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); printk("bus %d:slot %d:func %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; host = scsi_host_alloc(&megasas_template, sizeof(struct megasas_instance)); if (!host) { printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); goto fail_alloc_instance; } instance = (struct megasas_instance *)host->hostdata; memset(instance, 0, sizeof(*instance)); atomic_set( &instance->fw_reset_no_pci_access, 0 ); instance->pdev = pdev; switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: { struct fusion_context *fusion; instance->ctrl_context = kzalloc(sizeof(struct fusion_context), GFP_KERNEL); if (!instance->ctrl_context) { printk(KERN_DEBUG "megasas: Failed to allocate " "memory for Fusion context info\n"); goto fail_alloc_dma_buf; } fusion = instance->ctrl_context; INIT_LIST_HEAD(&fusion->cmd_pool); spin_lock_init(&fusion->cmd_pool_lock); } break; default: /* For all other supported controllers */ instance->producer = pci_alloc_consistent(pdev, sizeof(u32), &instance->producer_h); instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), &instance->consumer_h); if (!instance->producer || !instance->consumer) { printk(KERN_DEBUG "megasas: Failed to allocate" "memory for producer, consumer\n"); goto fail_alloc_dma_buf; } *instance->producer = 0; *instance->consumer = 0; break; } megasas_poll_wait_aen = 0; instance->flag_ieee = 0; instance->ev = NULL; instance->issuepend_done = 1; instance->adprecovery = MEGASAS_HBA_OPERATIONAL; megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, sizeof(struct megasas_evt_detail), &instance->evt_detail_h); if (!instance->evt_detail) { printk(KERN_DEBUG "megasas: Failed to allocate memory for " "event detail structure\n"); goto fail_alloc_dma_buf; } /* * Initialize locks and queues */ INIT_LIST_HEAD(&instance->cmd_pool); INIT_LIST_HEAD(&instance->internal_reset_pending_q); atomic_set(&instance->fw_outstanding,0); init_waitqueue_head(&instance->int_cmd_wait_q); init_waitqueue_head(&instance->abort_cmd_wait_q); spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); /* * Initialize PCI related and misc parameters */ instance->host = host; instance->unique_id = pdev->bus->number << 8 | pdev->devfn; instance->init_id = MEGASAS_DEFAULT_INIT_ID; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { instance->flag_ieee = 1; sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); } else sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 1; instance->last_time = 0; instance->disableOnlineCtrlReset = 1; if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); else INIT_WORK(&instance->work_init, process_fw_state_change_wq); /* * Initialize MFI Firmware */ if (megasas_init_fw(instance)) goto fail_init_mfi; /* * Register IRQ */ if (request_irq(instance->msi_flag ? instance->msixentry.vector : pdev->irq, instance->instancet->service_isr, IRQF_SHARED, "megasas", instance)) { printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); goto fail_irq; } instance->instancet->enable_intr(instance->reg_set); /* * Store instance in PCI softstate */ pci_set_drvdata(pdev, instance); /* * Add this controller to megasas_mgmt_info structure so that it * can be exported to management applications */ megasas_mgmt_info.count++; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; megasas_mgmt_info.max_index++; /* * Register with SCSI mid-layer */ if (megasas_io_attach(instance)) goto fail_io_attach; instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) { printk(KERN_DEBUG "megasas: start aen failed\n"); goto fail_start_aen; } return 0; fail_start_aen: fail_io_attach: megasas_mgmt_info.count--; megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; pci_set_drvdata(pdev, NULL); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); fail_irq: if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) megasas_release_fusion(instance); else megasas_release_mfi(instance); fail_init_mfi: if (instance->msi_flag) pci_disable_msix(instance->pdev); fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_alloc_instance: fail_set_dma_mask: pci_disable_device(pdev); return -ENODEV; } /** * megasas_flush_cache - Requests FW to flush all its caches * @instance: Adapter soft state */ static void megasas_flush_cache(struct megasas_instance *instance) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } /** * megasas_shutdown_controller - Instructs FW to shutdown the controller * @instance: Adapter soft state * @opcode: Shutdown/Hibernate */ static void megasas_shutdown_controller(struct megasas_instance *instance, u32 opcode) { struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) return; cmd = megasas_get_cmd(instance); if (!cmd) return; if (instance->aen_cmd) megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, instance->map_update_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; megasas_issue_blocked_cmd(instance, cmd); megasas_return_cmd(instance, cmd); return; } #ifdef CONFIG_PM /** * megasas_suspend - driver suspend entry point * @pdev: PCI device structure * @state: PCI power state to suspend routine */ static int megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; instance->unload = 1; if (poll_mode_io) del_timer_sync(&instance->io_completion_timer); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); if (instance->msi_flag) pci_disable_msix(instance->pdev); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /** * megasas_resume- driver resume entry point * @pdev: PCI device structure */ static int megasas_resume(struct pci_dev *pdev) { int rval; struct Scsi_Host *host; struct megasas_instance *instance; instance = pci_get_drvdata(pdev); host = instance->host; pci_set_power_state(pdev, PCI_D0); pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); /* * PCI prepping: enable device set bus mastering and dma mask */ rval = pci_enable_device_mem(pdev); if (rval) { printk(KERN_ERR "megasas: Enable device failed\n"); return rval; } pci_set_master(pdev); if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; /* * Initialize MFI Firmware */ atomic_set(&instance->fw_outstanding, 0); /* * We expect the FW state to be READY */ if (megasas_transition_to_ready(instance)) goto fail_ready_state; /* Now re-enable MSI-X */ if (instance->msi_flag) pci_enable_msix(instance->pdev, &instance->msixentry, 1); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { megasas_free_cmds(instance); megasas_free_cmds_fusion(instance); goto fail_init_mfi; } if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); } break; default: *instance->producer = 0; *instance->consumer = 0; if (megasas_issue_init_mfi(instance)) goto fail_init_mfi; break; } tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* * Register IRQ */ if (request_irq(instance->msi_flag ? instance->msixentry.vector : pdev->irq, instance->instancet->service_isr, IRQF_SHARED, "megasas", instance)) { printk(KERN_ERR "megasas: Failed to register IRQ\n"); goto fail_irq; } instance->instancet->enable_intr(instance->reg_set); /* Initialize the cmd completion timer */ if (poll_mode_io) megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); instance->unload = 0; /* * Initiate AEN (Asynchronous Event Notification) */ if (megasas_start_aen(instance)) printk(KERN_ERR "megasas: Start AEN failed\n"); return 0; fail_irq: fail_init_mfi: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); scsi_host_put(host); fail_set_dma_mask: fail_ready_state: pci_disable_device(pdev); return -ENODEV; } #else #define megasas_suspend NULL #define megasas_resume NULL #endif /** * megasas_detach_one - PCI hot"un"plug entry point * @pdev: PCI device structure */ static void __devexit megasas_detach_one(struct pci_dev *pdev) { int i; struct Scsi_Host *host; struct megasas_instance *instance; struct fusion_context *fusion; instance = pci_get_drvdata(pdev); instance->unload = 1; host = instance->host; fusion = instance->ctrl_context; if (poll_mode_io) del_timer_sync(&instance->io_completion_timer); scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); instance->ev = NULL; } tasklet_kill(&instance->isr_tasklet); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { if (megasas_mgmt_info.instance[i] == instance) { megasas_mgmt_info.count--; megasas_mgmt_info.instance[i] = NULL; break; } } pci_set_drvdata(instance->pdev, NULL); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); if (instance->msi_flag) pci_disable_msix(instance->pdev); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: megasas_release_fusion(instance); for (i = 0; i < 2 ; i++) if (fusion->ld_map[i]) dma_free_coherent(&instance->pdev->dev, fusion->map_sz, fusion->ld_map[i], fusion-> ld_map_phys[i]); kfree(instance->ctrl_context); break; default: megasas_release_mfi(instance); pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); break; } scsi_host_put(host); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return; } /** * megasas_shutdown - Shutdown entry point * @device: Generic device structure */ static void megasas_shutdown(struct pci_dev *pdev) { struct megasas_instance *instance = pci_get_drvdata(pdev); instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); if (instance->msi_flag) pci_disable_msix(instance->pdev); } /** * megasas_mgmt_open - char node "open" entry point */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { /* * Allow only those users with admin rights */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; } /** * megasas_mgmt_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an * event occurs, SIGIO will be sent to all processes in this queue. */ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) { int rc; mutex_lock(&megasas_async_queue_mutex); rc = fasync_helper(fd, filep, mode, &megasas_async_queue); mutex_unlock(&megasas_async_queue_mutex); if (rc >= 0) { /* For sanity check when we get ioctl */ filep->private_data = filep; return 0; } printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); return rc; } /** * megasas_mgmt_poll - char node "poll" entry point * */ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait) { unsigned int mask; unsigned long flags; poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) mask = (POLLIN | POLLRDNORM); else mask = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); return mask; } /** * megasas_mgmt_fw_ioctl - Issues management ioctls to FW * @instance: Adapter soft state * @argp: User's ioctl packet */ static int megasas_mgmt_fw_ioctl(struct megasas_instance *instance, struct megasas_iocpacket __user * user_ioc, struct megasas_iocpacket *ioc) { struct megasas_sge32 *kern_sge32; struct megasas_cmd *cmd; void *kbuff_arr[MAX_IOCTL_SGE]; dma_addr_t buf_handle = 0; int error = 0, i; void *sense = NULL; dma_addr_t sense_handle; unsigned long *sense_ptr; memset(kbuff_arr, 0, sizeof(kbuff_arr)); if (ioc->sge_count > MAX_IOCTL_SGE) { printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", ioc->sge_count, MAX_IOCTL_SGE); return -EINVAL; } cmd = megasas_get_cmd(instance); if (!cmd) { printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); return -ENOMEM; } /* * User's IOCTL packet has 2 frames (maximum). Copy those two * frames into our cmd's frames. cmd->frame's context will get * overwritten when we copy from user's frames. So set that value * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; /* * The management interface between applications and the fw uses * MFI frames. E.g, RAID configuration changes, LD property changes * etc are accomplishes through different kinds of MFI frames. The * driver needs to care only about substituting user buffers with * kernel buffers in SGLs. The location of SGL is embedded in the * struct iocpacket itself. */ kern_sge32 = (struct megasas_sge32 *) ((unsigned long)cmd->frame + ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { if (!ioc->sgl[i].iov_len) continue; kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); if (!kbuff_arr[i]) { printk(KERN_DEBUG "megasas: Failed to alloc " "kernel SGL buffer for IOCTL \n"); error = -ENOMEM; goto out; } /* * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ kern_sge32[i].phys_addr = (u32) buf_handle; kern_sge32[i].length = ioc->sgl[i].iov_len; /* * We created a kernel buffer corresponding to the * user buffer. Now copy in from the user buffer */ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, (u32) (ioc->sgl[i].iov_len))) { error = -EFAULT; goto out; } } if (ioc->sense_len) { sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len, &sense_handle, GFP_KERNEL); if (!sense) { error = -ENOMEM; goto out; } sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); *sense_ptr = sense_handle; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; megasas_issue_blocked_cmd(instance, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < ioc->sge_count; i++) { if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], ioc->sgl[i].iov_len)) { error = -EFAULT; goto out; } } /* * copy out the sense */ if (ioc->sense_len) { /* * sense_ptr points to the location that has the user * sense buffer address */ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + ioc->sense_off); if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), sense, ioc->sense_len)) { printk(KERN_ERR "megasas: Failed to copy out to user " "sense data\n"); error = -EFAULT; goto out; } } /* * copy the status codes returned by the fw */ if (copy_to_user(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); error = -EFAULT; } out: if (sense) { dma_free_coherent(&instance->pdev->dev, ioc->sense_len, sense, sense_handle); } for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { dma_free_coherent(&instance->pdev->dev, kern_sge32[i].length, kbuff_arr[i], kern_sge32[i].phys_addr); } megasas_return_cmd(instance, cmd); return error; } static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) { struct megasas_iocpacket __user *user_ioc = (struct megasas_iocpacket __user *)arg; struct megasas_iocpacket *ioc; struct megasas_instance *instance; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return -ENOMEM; if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { error = -EFAULT; goto out_kfree_ioc; } instance = megasas_lookup_instance(ioc->host_no); if (!instance) { error = -ENODEV; goto out_kfree_ioc; } if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "Controller in crit error\n"); error = -ENODEV; goto out_kfree_ioc; } if (instance->unload == 1) { error = -ENODEV; goto out_kfree_ioc; } /* * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds */ if (down_interruptible(&instance->ioctl_sem)) { error = -ERESTARTSYS; goto out_kfree_ioc; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: waiting" "for controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; goto out_kfree_ioc; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); up(&instance->ioctl_sem); out_kfree_ioc: kfree(ioc); return error; } static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) { struct megasas_instance *instance; struct megasas_aen aen; int error; int i; unsigned long flags; u32 wait_time = MEGASAS_RESET_WAIT_TIME; if (file->private_data != file) { printk(KERN_DEBUG "megasas: fasync_helper was not " "called first\n"); return -EINVAL; } if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) return -EFAULT; instance = megasas_lookup_instance(aen.host_no); if (!instance) return -ENODEV; if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { return -ENODEV; } if (instance->unload == 1) { return -ENODEV; } for (i = 0; i < wait_time; i++) { spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); break; } spin_unlock_irqrestore(&instance->hba_lock, flags); if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: waiting for" "controller reset to finish\n"); } msleep(1000); } spin_lock_irqsave(&instance->hba_lock, flags); if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); printk(KERN_ERR "megaraid_sas: timed out while waiting" "for HBA to recover.\n"); return -ENODEV; } spin_unlock_irqrestore(&instance->hba_lock, flags); mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, aen.seq_num, aen.class_locale_word); mutex_unlock(&instance->aen_mutex); return error; } /** * megasas_mgmt_ioctl - char node ioctl entry point */ static long megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE: return megasas_mgmt_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #ifdef CONFIG_COMPAT static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) { struct compat_megasas_iocpacket __user *cioc = (struct compat_megasas_iocpacket __user *)arg; struct megasas_iocpacket __user *ioc = compat_alloc_user_space(sizeof(struct megasas_iocpacket)); int i; int error = 0; compat_uptr_t ptr; if (clear_user(ioc, sizeof(*ioc))) return -EFAULT; if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) return -EFAULT; /* * The sense_ptr is used in megasas_mgmt_fw_ioctl only when * sense_len is not null, so prepare the 64bit value under * the same condition. */ if (ioc->sense_len) { void __user **sense_ioc_ptr = (void __user **)(ioc->frame.raw + ioc->sense_off); compat_uptr_t *sense_cioc_ptr = (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off); if (get_user(ptr, sense_cioc_ptr) || put_user(compat_ptr(ptr), sense_ioc_ptr)) return -EFAULT; } for (i = 0; i < MAX_IOCTL_SGE; i++) { if (get_user(ptr, &cioc->sgl[i].iov_base) || put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || copy_in_user(&ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len, sizeof(compat_size_t))) return -EFAULT; } error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); if (copy_in_user(&cioc->frame.hdr.cmd_status, &ioc->frame.hdr.cmd_status, sizeof(u8))) { printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); return -EFAULT; } return error; } static long megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case MEGASAS_IOC_FIRMWARE32: return megasas_mgmt_compat_ioctl_fw(file, arg); case MEGASAS_IOC_GET_AEN: return megasas_mgmt_ioctl_aen(file, arg); } return -ENOTTY; } #endif /* * File operations structure for management interface */ static const struct file_operations megasas_mgmt_fops = { .owner = THIS_MODULE, .open = megasas_mgmt_open, .fasync = megasas_mgmt_fasync, .unlocked_ioctl = megasas_mgmt_ioctl, .poll = megasas_mgmt_poll, #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif .llseek = noop_llseek, }; /* * PCI hotplug support registration structure */ static struct pci_driver megasas_pci_driver = { .name = "megaraid_sas", .id_table = megasas_pci_table, .probe = megasas_probe_one, .remove = __devexit_p(megasas_detach_one), .suspend = megasas_suspend, .resume = megasas_resume, .shutdown = megasas_shutdown, }; /* * Sysfs driver attributes */ static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", MEGASAS_VERSION); } static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); static ssize_t megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) { return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", MEGASAS_RELDATE); } static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL); static ssize_t megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_poll_for_event); } static DRIVER_ATTR(support_poll_for_event, S_IRUGO, megasas_sysfs_show_support_poll_for_event, NULL); static ssize_t megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", support_device_change); } static DRIVER_ATTR(support_device_change, S_IRUGO, megasas_sysfs_show_support_device_change, NULL); static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", megasas_dbg_lvl); } static ssize_t megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) { int retval = count; if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){ printk(KERN_ERR "megasas: could not set dbg_lvl\n"); retval = -EINVAL; } return retval; } static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl, megasas_sysfs_set_dbg_lvl); static ssize_t megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf) { return sprintf(buf, "%u\n", poll_mode_io); } static ssize_t megasas_sysfs_set_poll_mode_io(struct device_driver *dd, const char *buf, size_t count) { int retval = count; int tmp = poll_mode_io; int i; struct megasas_instance *instance; if (sscanf(buf, "%u", &poll_mode_io) < 1) { printk(KERN_ERR "megasas: could not set poll_mode_io\n"); retval = -EINVAL; } /* * Check if poll_mode_io is already set or is same as previous value */ if ((tmp && poll_mode_io) || (tmp == poll_mode_io)) goto out; if (poll_mode_io) { /* * Start timers for all adapters */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { instance = megasas_mgmt_info.instance[i]; if (instance) { megasas_start_timer(instance, &instance->io_completion_timer, megasas_io_completion_timer, MEGASAS_COMPLETION_TIMER_INTERVAL); } } } else { /* * Delete timers for all adapters */ for (i = 0; i < megasas_mgmt_info.max_index; i++) { instance = megasas_mgmt_info.instance[i]; if (instance) del_timer_sync(&instance->io_completion_timer); } } out: return retval; } static void megasas_aen_polling(struct work_struct *work) { struct megasas_aen_event *ev = container_of(work, struct megasas_aen_event, hotplug_work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; struct Scsi_Host *host; struct scsi_device *sdev1; u16 pd_index = 0; u16 ld_index = 0; int i, j, doscan = 0; u32 seq_num; int error; if (!instance) { printk(KERN_ERR "invalid instance!\n"); kfree(ev); return; } instance->ev = NULL; host = instance->host; if (instance->evt_detail) { switch (instance->evt_detail->code) { case MR_EVT_PD_INSERTED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_PD_REMOVED: if (megasas_get_pd_list(instance) == 0) { megasas_get_pd_list(instance); for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } doscan = 0; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i + MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (sdev1) { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } doscan = 0; break; case MR_EVT_LD_CREATED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, i + 2, j, 0); } } if (sdev1) { scsi_device_put(sdev1); } } } doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; default: doscan = 0; break; } } else { printk(KERN_ERR "invalid evt_detail!\n"); kfree(ev); return; } if (doscan) { printk(KERN_INFO "scanning ...\n"); megasas_get_pd_list(instance); for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; sdev1 = scsi_device_lookup(host, i, j, 0); if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { if (!sdev1) { scsi_add_device(host, i, j, 0); } if (sdev1) scsi_device_put(sdev1); } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; sdev1 = scsi_device_lookup(host, i+MEGASAS_MAX_LD_CHANNELS, j, 0); if (instance->ld_ids[ld_index] != 0xff) { if (!sdev1) { scsi_add_device(host, i+2, j, 0); } else { scsi_device_put(sdev1); } } else { if (sdev1) { scsi_remove_device(sdev1); scsi_device_put(sdev1); } } } } } if ( instance->aen_cmd != NULL ) { kfree(ev); return ; } seq_num = instance->evt_detail->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; mutex_lock(&instance->aen_mutex); error = megasas_register_aen(instance, seq_num, class_locale.word); mutex_unlock(&instance->aen_mutex); if (error) printk(KERN_ERR "register aen failed error %x\n", error); kfree(ev); } static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, megasas_sysfs_show_poll_mode_io, megasas_sysfs_set_poll_mode_io); /** * megasas_init - Driver load entry point */ static int __init megasas_init(void) { int rval; /* * Announce driver version and other information */ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); support_poll_for_event = 2; support_device_change = 1; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); /* * Register character device node */ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); if (rval < 0) { printk(KERN_DEBUG "megasas: failed to open device node\n"); return rval; } megasas_mgmt_majorno = rval; /* * Register ourselves as PCI hotplug module */ rval = pci_register_driver(&megasas_pci_driver); if (rval) { printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); goto err_pcidrv; } rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); if (rval) goto err_dcf_attr_ver; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_release_date); if (rval) goto err_dcf_rel_date; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); if (rval) goto err_dcf_support_poll_for_event; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); if (rval) goto err_dcf_dbg_lvl; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_poll_mode_io); if (rval) goto err_dcf_poll_mode_io; rval = driver_create_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); if (rval) goto err_dcf_support_device_change; return rval; err_dcf_support_device_change: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_poll_mode_io); err_dcf_poll_mode_io: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); err_dcf_dbg_lvl: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); err_dcf_support_poll_for_event: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); err_dcf_rel_date: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); err_dcf_attr_ver: pci_unregister_driver(&megasas_pci_driver); err_pcidrv: unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); return rval; } /** * megasas_exit - Driver unload entry point */ static void __exit megasas_exit(void) { driver_remove_file(&megasas_pci_driver.driver, &driver_attr_poll_mode_io); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_poll_for_event); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_support_device_change); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); pci_unregister_driver(&megasas_pci_driver); unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); } module_init(megasas_init); module_exit(megasas_exit);
gpl-2.0
PRJosh/kernel_lge_mako
drivers/staging/vt6656/main_usb.c
1118
59914
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: main_usb.c * * Purpose: driver entry for initial, open, close, tx and rx. * * Author: Lyndon Chen * * Date: Dec 8, 2005 * * Functions: * * vt6656_probe - module initial (insmod) driver entry * device_remove1 - module remove entry * device_open - allocate dma/descripter resource & initial mac/bbp function * device_xmit - asynchrous data tx function * device_set_multi - set mac filter * device_ioctl - ioctl entry * device_close - shutdown mac/bbp & free dma/descripter resource * device_alloc_frag_buf - rx fragement pre-allocated function * device_free_tx_bufs - free tx buffer function * device_dma0_tx_80211- tx 802.11 frame via dma0 * device_dma0_xmit- tx PS bufferred frame via dma0 * device_init_registers- initial MAC & BBP & RF internal registers. * device_init_rings- initial tx/rx ring buffer * device_init_defrag_cb- initial & allocate de-fragement buffer. * device_tx_srv- tx interrupt service function * * Revision History: */ #undef __NO_VERSION__ #include "device.h" #include "card.h" #include "baseband.h" #include "mac.h" #include "tether.h" #include "wmgr.h" #include "wctl.h" #include "power.h" #include "wcmd.h" #include "iocmd.h" #include "tcrc.h" #include "rxtx.h" #include "bssdb.h" #include "hostap.h" #include "wpactl.h" #include "ioctl.h" #include "iwctl.h" #include "dpc.h" #include "datarate.h" #include "rf.h" #include "firmware.h" #include "rndis.h" #include "control.h" #include "channel.h" #include "int.h" #include "iowpa.h" /*--------------------- Static Definitions -------------------------*/ //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; // // Define module options // // Version Information #define DRIVER_AUTHOR "VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DEVICE_FULL_DRV_NAM); #define DEVICE_PARAM(N,D) \ static int N[MAX_UINTS]=OPTION_DEFAULT;\ module_param_array(N, int, NULL, 0);\ MODULE_PARM_DESC(N, D); #define RX_DESC_MIN0 16 #define RX_DESC_MAX0 128 #define RX_DESC_DEF0 64 DEVICE_PARAM(RxDescriptors0,"Number of receive usb desc buffer"); #define TX_DESC_MIN0 16 #define TX_DESC_MAX0 128 #define TX_DESC_DEF0 64 DEVICE_PARAM(TxDescriptors0,"Number of transmit usb desc buffer"); #define CHANNEL_MIN 1 #define CHANNEL_MAX 14 #define CHANNEL_DEF 6 DEVICE_PARAM(Channel, "Channel number"); /* PreambleType[] is the preamble length used for transmit. 0: indicate allows long preamble type 1: indicate allows short preamble type */ #define PREAMBLE_TYPE_DEF 1 DEVICE_PARAM(PreambleType, "Preamble Type"); #define RTS_THRESH_MIN 512 #define RTS_THRESH_MAX 2347 #define RTS_THRESH_DEF 2347 DEVICE_PARAM(RTSThreshold, "RTS threshold"); #define FRAG_THRESH_MIN 256 #define FRAG_THRESH_MAX 2346 #define FRAG_THRESH_DEF 2346 DEVICE_PARAM(FragThreshold, "Fragmentation threshold"); #define DATA_RATE_MIN 0 #define DATA_RATE_MAX 13 #define DATA_RATE_DEF 13 /* datarate[] index 0: indicate 1 Mbps 0x02 1: indicate 2 Mbps 0x04 2: indicate 5.5 Mbps 0x0B 3: indicate 11 Mbps 0x16 4: indicate 6 Mbps 0x0c 5: indicate 9 Mbps 0x12 6: indicate 12 Mbps 0x18 7: indicate 18 Mbps 0x24 8: indicate 24 Mbps 0x30 9: indicate 36 Mbps 0x48 10: indicate 48 Mbps 0x60 11: indicate 54 Mbps 0x6c 12: indicate 72 Mbps 0x90 13: indicate auto rate */ DEVICE_PARAM(ConnectionRate, "Connection data rate"); #define OP_MODE_MAX 2 #define OP_MODE_DEF 0 #define OP_MODE_MIN 0 DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode "); /* OpMode[] is used for transmit. 0: indicate infrastruct mode used 1: indicate adhoc mode used 2: indicate AP mode used */ /* PSMode[] 0: indicate disable power saving mode 1: indicate enable power saving mode */ #define PS_MODE_DEF 0 DEVICE_PARAM(PSMode, "Power saving mode"); #define SHORT_RETRY_MIN 0 #define SHORT_RETRY_MAX 31 #define SHORT_RETRY_DEF 8 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits"); #define LONG_RETRY_MIN 0 #define LONG_RETRY_MAX 15 #define LONG_RETRY_DEF 4 DEVICE_PARAM(LongRetryLimit, "long frame retry limits"); /* BasebandType[] baseband type selected 0: indicate 802.11a type 1: indicate 802.11b type 2: indicate 802.11g type */ #define BBP_TYPE_MIN 0 #define BBP_TYPE_MAX 2 #define BBP_TYPE_DEF 2 DEVICE_PARAM(BasebandType, "baseband type"); /* 80211hEnable[] 0: indicate disable 802.11h 1: indicate enable 802.11h */ #define X80211h_MODE_DEF 0 DEVICE_PARAM(b80211hEnable, "802.11h mode"); // // Static vars definitions // static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; // Frequency list (map channels to frequencies) /* static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484, 4915, 4920, 4925, 4935, 4940, 4945, 4960, 4980, 5035, 5040, 5045, 5055, 5060, 5080, 5170, 5180, 5190, 5200, 5210, 5220, 5230, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700, 5745, 5765, 5785, 5805, 5825 }; #ifndef IW_ENCODE_NOKEY #define IW_ENCODE_NOKEY 0x0800 #define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN) #endif static const struct iw_handler_def iwctl_handler_def; */ /*--------------------- Static Functions --------------------------*/ static int vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id); static void vt6656_disconnect(struct usb_interface *intf); #ifdef CONFIG_PM /* Minimal support for suspend and resume */ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message); static int vt6656_resume(struct usb_interface *intf); #endif /* CONFIG_PM */ static struct net_device_stats *device_get_stats(struct net_device *dev); static int device_open(struct net_device *dev); static int device_xmit(struct sk_buff *skb, struct net_device *dev); static void device_set_multi(struct net_device *dev); static int device_close(struct net_device *dev); static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType); static BOOL device_init_defrag_cb(PSDevice pDevice); static void device_init_diversity_timer(PSDevice pDevice); static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev); static int ethtool_ioctl(struct net_device *dev, void *useraddr); static void device_free_tx_bufs(PSDevice pDevice); static void device_free_rx_bufs(PSDevice pDevice); static void device_free_int_bufs(PSDevice pDevice); static void device_free_frag_bufs(PSDevice pDevice); static BOOL device_alloc_bufs(PSDevice pDevice); static int Read_config_file(PSDevice pDevice); static unsigned char *Config_FileOperation(PSDevice pDevice); static int Config_FileGetParameter(unsigned char *string, unsigned char *dest, unsigned char *source); static BOOL device_release_WPADEV(PSDevice pDevice); static void usb_device_reset(PSDevice pDevice); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ static void device_set_options(PSDevice pDevice) { BYTE abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; BYTE abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}; u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8}; memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN); memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN); memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN); pDevice->cbTD = TX_DESC_DEF0; pDevice->cbRD = RX_DESC_DEF0; pDevice->uChannel = CHANNEL_DEF; pDevice->wRTSThreshold = RTS_THRESH_DEF; pDevice->wFragmentationThreshold = FRAG_THRESH_DEF; pDevice->byShortRetryLimit = SHORT_RETRY_DEF; pDevice->byLongRetryLimit = LONG_RETRY_DEF; pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME; pDevice->byShortPreamble = PREAMBLE_TYPE_DEF; pDevice->ePSMode = PS_MODE_DEF; pDevice->b11hEnable = X80211h_MODE_DEF; pDevice->eOPMode = OP_MODE_DEF; pDevice->uConnectionRate = DATA_RATE_DEF; if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = TRUE; pDevice->byBBType = BBP_TYPE_DEF; pDevice->byPacketType = pDevice->byBBType; pDevice->byAutoFBCtrl = AUTO_FB_0; pDevice->bUpdateBBVGA = TRUE; pDevice->byFOETuning = 0; pDevice->byAutoPwrTunning = 0; pDevice->wCTSDuration = 0; pDevice->byPreambleType = 0; pDevice->bExistSWNetAddr = FALSE; // pDevice->bDiversityRegCtlON = TRUE; pDevice->bDiversityRegCtlON = FALSE; } static void device_init_diversity_timer(PSDevice pDevice) { init_timer(&pDevice->TimerSQ3Tmax1); pDevice->TimerSQ3Tmax1.data = (unsigned long)pDevice; pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack; pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ); init_timer(&pDevice->TimerSQ3Tmax2); pDevice->TimerSQ3Tmax2.data = (unsigned long)pDevice; pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack; pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ); init_timer(&pDevice->TimerSQ3Tmax3); pDevice->TimerSQ3Tmax3.data = (unsigned long)pDevice; pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerSQ3Tmax3CallBack; pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ); return; } // // Initialiation of MAC & BBP registers // static BOOL device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType) { u8 abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}; u8 abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8}; BYTE byAntenna; unsigned int ii; CMD_CARD_INIT sInitCmd; int ntStatus = STATUS_SUCCESS; RSP_CARD_INIT sInitRsp; PSMgmtObject pMgmt = &(pDevice->sMgmtObj); BYTE byTmp; BYTE byCalibTXIQ = 0; BYTE byCalibTXDC = 0; BYTE byCalibRXIQ = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "---->INIbInitAdapter. [%d][%d]\n", InitType, pDevice->byPacketType); spin_lock_irq(&pDevice->lock); if (InitType == DEVICE_INIT_COLD) { memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN); memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN); memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN); if ( !FIRMWAREbCheckVersion(pDevice) ) { if (FIRMWAREbDownload(pDevice) == TRUE) { if (FIRMWAREbBrach2Sram(pDevice) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbBrach2Sram fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" FIRMWAREbDownload fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } if ( !BBbVT3184Init(pDevice) ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" BBbVT3184Init fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } } sInitCmd.byInitClass = (BYTE)InitType; sInitCmd.bExistSWNetAddr = (BYTE) pDevice->bExistSWNetAddr; for (ii = 0; ii < 6; ii++) sInitCmd.bySWNetAddr[ii] = pDevice->abyCurrentNetAddr[ii]; sInitCmd.byShortRetryLimit = pDevice->byShortRetryLimit; sInitCmd.byLongRetryLimit = pDevice->byLongRetryLimit; //issue Card_init command to device ntStatus = CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CARDINIT, 0, 0, sizeof(CMD_CARD_INIT), (PBYTE) &(sInitCmd)); if ( ntStatus != STATUS_SUCCESS ) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Issue Card init fail \n"); spin_unlock_irq(&pDevice->lock); return FALSE; } if (InitType == DEVICE_INIT_COLD) { ntStatus = CONTROLnsRequestIn(pDevice,MESSAGE_TYPE_INIT_RSP,0,0,sizeof(RSP_CARD_INIT), (PBYTE) &(sInitRsp)); if (ntStatus != STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Cardinit request in status fail!\n"); spin_unlock_irq(&pDevice->lock); return FALSE; } //Local ID for AES functions ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_LOCALID, MESSAGE_REQUEST_MACREG, 1, &pDevice->byLocalID); if ( ntStatus != STATUS_SUCCESS ) { spin_unlock_irq(&pDevice->lock); return FALSE; } // Do MACbSoftwareReset in MACvInitialize // force CCK pDevice->bCCK = TRUE; pDevice->bProtectMode = FALSE; //Only used in 11g type, sync with ERP IE pDevice->bNonERPPresent = FALSE; pDevice->bBarkerPreambleMd = FALSE; if ( pDevice->bFixRate ) { pDevice->wCurrentRate = (WORD) pDevice->uConnectionRate; } else { if ( pDevice->byBBType == BB_TYPE_11B ) pDevice->wCurrentRate = RATE_11M; else pDevice->wCurrentRate = RATE_54M; } CHvInitChannelTable(pDevice); pDevice->byTopOFDMBasicRate = RATE_24M; pDevice->byTopCCKBasicRate = RATE_1M; pDevice->byRevId = 0; //Target to IF pin while programming to RF chip. pDevice->byCurPwr = 0xFF; pDevice->byCCKPwr = pDevice->abyEEPROM[EEP_OFS_PWR_CCK]; pDevice->byOFDMPwrG = pDevice->abyEEPROM[EEP_OFS_PWR_OFDMG]; // Load power Table for (ii=0;ii<14;ii++) { pDevice->abyCCKPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_CCK_PWR_TBL]; if (pDevice->abyCCKPwrTbl[ii] == 0) pDevice->abyCCKPwrTbl[ii] = pDevice->byCCKPwr; pDevice->abyOFDMPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDM_PWR_TBL]; if (pDevice->abyOFDMPwrTbl[ii] == 0) pDevice->abyOFDMPwrTbl[ii] = pDevice->byOFDMPwrG; } //original zonetype is USA,but customize zonetype is europe, // then need recover 12,13 ,14 channel with 11 channel if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) || (pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&& (pDevice->byOriginalZonetype == ZoneType_USA)) { for (ii = 11; ii < 14; ii++) { pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10]; pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10]; } } //{{ RobertYu: 20041124 pDevice->byOFDMPwrA = 0x34; // same as RFbMA2829SelectChannel // Load OFDM A Power Table for (ii=0;ii<CB_MAX_CHANNEL_5G;ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL pDevice->abyOFDMAPwrTbl[ii] = pDevice->abyEEPROM[ii + EEP_OFS_OFDMA_PWR_TBL]; if (pDevice->abyOFDMAPwrTbl[ii] == 0) pDevice->abyOFDMAPwrTbl[ii] = pDevice->byOFDMPwrA; } //}} RobertYu byAntenna = pDevice->abyEEPROM[EEP_OFS_ANTENNA]; if (byAntenna & EEP_ANTINV) pDevice->bTxRxAntInv = TRUE; else pDevice->bTxRxAntInv = FALSE; byAntenna &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN); if (byAntenna == 0) // if not set default is All byAntenna = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN); if (byAntenna == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) { pDevice->byAntennaCount = 2; pDevice->byTxAntennaMode = ANT_B; pDevice->dwTxAntennaSel = 1; pDevice->dwRxAntennaSel = 1; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_A; else pDevice->byRxAntennaMode = ANT_B; if (pDevice->bDiversityRegCtlON) pDevice->bDiversityEnable = TRUE; else pDevice->bDiversityEnable = FALSE; } else { pDevice->bDiversityEnable = FALSE; pDevice->byAntennaCount = 1; pDevice->dwTxAntennaSel = 0; pDevice->dwRxAntennaSel = 0; if (byAntenna & EEP_ANTENNA_AUX) { pDevice->byTxAntennaMode = ANT_A; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_B; else pDevice->byRxAntennaMode = ANT_A; } else { pDevice->byTxAntennaMode = ANT_B; if (pDevice->bTxRxAntInv == TRUE) pDevice->byRxAntennaMode = ANT_A; else pDevice->byRxAntennaMode = ANT_B; } } pDevice->ulDiversityNValue = 100*255; pDevice->ulDiversityMValue = 100*16; pDevice->byTMax = 1; pDevice->byTMax2 = 4; pDevice->ulSQ3TH = 0; pDevice->byTMax3 = 64; // ----------------------------------------------------------------- //Get Auto Fall Back Type pDevice->byAutoFBCtrl = AUTO_FB_0; // Set SCAN Time pDevice->uScanTime = WLAN_SCAN_MINITIME; // default Auto Mode //pDevice->NetworkType = Ndis802_11Automode; pDevice->eConfigPHYMode = PHY_TYPE_AUTO; pDevice->byBBType = BB_TYPE_11G; // initialize BBP registers pDevice->ulTxPower = 25; // Get Channel range pDevice->byMinChannel = 1; pDevice->byMaxChannel = CB_MAX_CHANNEL; // Get RFType pDevice->byRFType = sInitRsp.byRFType; if ((pDevice->byRFType & RF_EMU) != 0) { // force change RevID for VT3253 emu pDevice->byRevId = 0x80; } // Load EEPROM calibrated vt3266 parameters if (pDevice->byRFType == RF_VT3226D0) { if((pDevice->abyEEPROM[EEP_OFS_MAJOR_VER] == 0x1) && (pDevice->abyEEPROM[EEP_OFS_MINOR_VER] >= 0x4)) { byCalibTXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_IQ]; byCalibTXDC = pDevice->abyEEPROM[EEP_OFS_CALIB_TX_DC]; byCalibRXIQ = pDevice->abyEEPROM[EEP_OFS_CALIB_RX_IQ]; if( (byCalibTXIQ || byCalibTXDC || byCalibRXIQ) ) { ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x03); // CR255, Set BB to support TX/RX IQ and DC compensation Mode ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFB, byCalibTXIQ); // CR251, TX I/Q Imbalance Calibration ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFC, byCalibTXDC); // CR252, TX DC-Offset Calibration ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFD, byCalibRXIQ); // CR253, RX I/Q Imbalance Calibration } else { // turn off BB Calibration compensation ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xFF, 0x0); // CR255 } } } pMgmt->eScanType = WMAC_SCAN_PASSIVE; pMgmt->uCurrChannel = pDevice->uChannel; pMgmt->uIBSSChannel = pDevice->uChannel; CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel); // get Permanent network address memcpy(pDevice->abyPermanentNetAddr,&(sInitRsp.byNetAddr[0]),6); memcpy(pDevice->abyCurrentNetAddr, pDevice->abyPermanentNetAddr, ETH_ALEN); // if exist SW network address, use SW network address. DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n", pDevice->abyCurrentNetAddr); } // Set BB and packet type at the same time. // Set Short Slot Time, xIFS, and RSPINF. if (pDevice->byBBType == BB_TYPE_11A) { CARDbAddBasicRate(pDevice, RATE_6M); pDevice->bShortSlotTime = TRUE; } else { CARDbAddBasicRate(pDevice, RATE_1M); pDevice->bShortSlotTime = FALSE; } BBvSetShortSlotTime(pDevice); CARDvSetBSSMode(pDevice); if (pDevice->bUpdateBBVGA) { pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; pDevice->byBBVGANew = pDevice->byBBVGACurrent; BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]); } pDevice->byRadioCtl = pDevice->abyEEPROM[EEP_OFS_RADIOCTL]; pDevice->bHWRadioOff = FALSE; if ( (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) != 0 ) { ntStatus = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_GPIOCTL1, MESSAGE_REQUEST_MACREG, 1, &byTmp); if ( ntStatus != STATUS_SUCCESS ) { spin_unlock_irq(&pDevice->lock); return FALSE; } if ( (byTmp & GPIO3_DATA) == 0 ) { pDevice->bHWRadioOff = TRUE; MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD); } else { MACvRegBitsOff(pDevice,MAC_REG_GPIOCTL1,GPIO3_INTMD); pDevice->bHWRadioOff = FALSE; } } //EEP_RADIOCTL_ENABLE ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_TMLEN,0x38); ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); MACvRegBitsOn(pDevice,MAC_REG_GPIOCTL0,0x01); if ((pDevice->bHWRadioOff == TRUE) || (pDevice->bRadioControlOff == TRUE)) { CARDbRadioPowerOff(pDevice); } else { CARDbRadioPowerOn(pDevice); } spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----INIbInitAdapter Exit\n"); return TRUE; } static BOOL device_release_WPADEV(PSDevice pDevice) { viawget_wpa_header *wpahdr; int ii=0; // wait_queue_head_t Set_wait; //send device close to wpa_supplicnat layer if (pDevice->bWPADEVUp==TRUE) { wpahdr = (viawget_wpa_header *)pDevice->skb->data; wpahdr->type = VIAWGET_DEVICECLOSE_MSG; wpahdr->resp_ie_len = 0; wpahdr->req_ie_len = 0; skb_put(pDevice->skb, sizeof(viawget_wpa_header)); pDevice->skb->dev = pDevice->wpadev; skb_reset_mac_header(pDevice->skb); pDevice->skb->pkt_type = PACKET_HOST; pDevice->skb->protocol = htons(ETH_P_802_2); memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb)); netif_rx(pDevice->skb); pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); //wait release WPADEV // init_waitqueue_head(&Set_wait); // wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait while(pDevice->bWPADEVUp==TRUE) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout (HZ/20); //wait 50ms ii++; if(ii>20) break; } } return TRUE; } #ifdef CONFIG_PM /* Minimal support for suspend and resume */ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message) { PSDevice device = usb_get_intfdata(intf); if (!device || !device->dev) return -ENODEV; if (device->flags & DEVICE_FLAGS_OPENED) device_close(device->dev); return 0; } static int vt6656_resume(struct usb_interface *intf) { PSDevice device = usb_get_intfdata(intf); if (!device || !device->dev) return -ENODEV; if (!(device->flags & DEVICE_FLAGS_OPENED)) device_open(device->dev); return 0; } #endif /* CONFIG_PM */ static const struct net_device_ops device_netdev_ops = { .ndo_open = device_open, .ndo_stop = device_close, .ndo_do_ioctl = device_ioctl, .ndo_get_stats = device_get_stats, .ndo_start_xmit = device_xmit, .ndo_set_rx_mode = device_set_multi, }; static int __devinit vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id) { u8 fake_mac[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; struct usb_device *udev = interface_to_usbdev(intf); int rc = 0; struct net_device *netdev = NULL; PSDevice pDevice = NULL; printk(KERN_NOTICE "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION); printk(KERN_NOTICE "Copyright (c) 2004 VIA Networking Technologies, Inc.\n"); udev = usb_get_dev(udev); netdev = alloc_etherdev(sizeof(DEVICE_INFO)); if (!netdev) { printk(KERN_ERR DEVICE_NAME ": allocate net device failed\n"); rc = -ENOMEM; goto err_nomem; } pDevice = netdev_priv(netdev); memset(pDevice, 0, sizeof(DEVICE_INFO)); pDevice->dev = netdev; pDevice->usb = udev; device_set_options(pDevice); spin_lock_init(&pDevice->lock); pDevice->tx_80211 = device_dma0_tx_80211; pDevice->sMgmtObj.pAdapter = (void *) pDevice; netdev->netdev_ops = &device_netdev_ops; netdev->wireless_handlers = (struct iw_handler_def *) &iwctl_handler_def; usb_set_intfdata(intf, pDevice); SET_NETDEV_DEV(netdev, &intf->dev); memcpy(pDevice->dev->dev_addr, fake_mac, ETH_ALEN); rc = register_netdev(netdev); if (rc) { printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n"); goto err_netdev; } usb_device_reset(pDevice); { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_INSMOD_EVENT_FLAG; wrqu.data.length = IFNAMSIZ; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, pDevice->dev->name); } return 0; err_netdev: free_netdev(netdev); err_nomem: usb_put_dev(udev); return rc; } static void device_free_tx_bufs(PSDevice pDevice) { PUSB_SEND_CONTEXT pTxContext; int ii; for (ii = 0; ii < pDevice->cbTD; ii++) { pTxContext = pDevice->apTD[ii]; //de-allocate URBs if (pTxContext->pUrb) { usb_kill_urb(pTxContext->pUrb); usb_free_urb(pTxContext->pUrb); } kfree(pTxContext); } return; } static void device_free_rx_bufs(PSDevice pDevice) { PRCB pRCB; int ii; for (ii = 0; ii < pDevice->cbRD; ii++) { pRCB = pDevice->apRCB[ii]; //de-allocate URBs if (pRCB->pUrb) { usb_kill_urb(pRCB->pUrb); usb_free_urb(pRCB->pUrb); } //de-allocate skb if (pRCB->skb) dev_kfree_skb(pRCB->skb); } kfree(pDevice->pRCBMem); return; } static void usb_device_reset(PSDevice pDevice) { int status; status = usb_reset_device(pDevice->usb); if (status) printk("usb_device_reset fail status=%d\n",status); return ; } static void device_free_int_bufs(PSDevice pDevice) { kfree(pDevice->intBuf.pDataBuf); return; } static BOOL device_alloc_bufs(PSDevice pDevice) { PUSB_SEND_CONTEXT pTxContext; PRCB pRCB; int ii; for (ii = 0; ii < pDevice->cbTD; ii++) { pTxContext = kmalloc(sizeof(USB_SEND_CONTEXT), GFP_KERNEL); if (pTxContext == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name); goto free_tx; } pDevice->apTD[ii] = pTxContext; pTxContext->pDevice = (void *) pDevice; //allocate URBs pTxContext->pUrb = usb_alloc_urb(0, GFP_ATOMIC); if (pTxContext->pUrb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "alloc tx urb failed\n"); goto free_tx; } pTxContext->bBoolInUse = FALSE; } // allocate rcb mem pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL); if (pDevice->pRCBMem == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name); goto free_tx; } pDevice->FirstRecvFreeList = NULL; pDevice->LastRecvFreeList = NULL; pDevice->FirstRecvMngList = NULL; pDevice->LastRecvMngList = NULL; pDevice->NumRecvFreeList = 0; pRCB = (PRCB) pDevice->pRCBMem; for (ii = 0; ii < pDevice->cbRD; ii++) { pDevice->apRCB[ii] = pRCB; pRCB->pDevice = (void *) pDevice; //allocate URBs pRCB->pUrb = usb_alloc_urb(0, GFP_ATOMIC); if (pRCB->pUrb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx urb\n"); goto free_rx_tx; } pRCB->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR" Failed to alloc rx skb\n"); goto free_rx_tx; } pRCB->skb->dev = pDevice->dev; pRCB->bBoolInUse = FALSE; EnqueueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList, pRCB); pDevice->NumRecvFreeList++; pRCB++; } pDevice->pControlURB = usb_alloc_urb(0, GFP_ATOMIC); if (pDevice->pControlURB == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc control urb\n"); goto free_rx_tx; } pDevice->pInterruptURB = usb_alloc_urb(0, GFP_ATOMIC); if (pDevice->pInterruptURB == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int urb\n"); usb_free_urb(pDevice->pControlURB); goto free_rx_tx; } pDevice->intBuf.pDataBuf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL); if (pDevice->intBuf.pDataBuf == NULL) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR"Failed to alloc int buf\n"); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); goto free_rx_tx; } return TRUE; free_rx_tx: device_free_rx_bufs(pDevice); free_tx: device_free_tx_bufs(pDevice); return FALSE; } static BOOL device_init_defrag_cb(PSDevice pDevice) { int i; PSDeFragControlBlock pDeF; /* Init the fragment ctl entries */ for (i = 0; i < CB_MAX_RX_FRAG; i++) { pDeF = &(pDevice->sRxDFCB[i]); if (!device_alloc_frag_buf(pDevice, pDeF)) { DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc frag bufs\n", pDevice->dev->name); goto free_frag; } } pDevice->cbDFCB = CB_MAX_RX_FRAG; pDevice->cbFreeDFCB = pDevice->cbDFCB; return TRUE; free_frag: device_free_frag_bufs(pDevice); return FALSE; } static void device_free_frag_bufs(PSDevice pDevice) { PSDeFragControlBlock pDeF; int i; for (i = 0; i < CB_MAX_RX_FRAG; i++) { pDeF = &(pDevice->sRxDFCB[i]); if (pDeF->skb) dev_kfree_skb(pDeF->skb); } } BOOL device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) { pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz); if (pDeF->skb == NULL) return FALSE; ASSERT(pDeF->skb); pDeF->skb->dev = pDevice->dev; return TRUE; } /*-----------------------------------------------------------------*/ static int device_open(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); extern SWPAResult wpa_Result; memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname)); wpa_Result.proto = 0; wpa_Result.key_mgmt = 0; wpa_Result.eap_type = 0; wpa_Result.authenticated = FALSE; pDevice->fWPA_Authened = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_open...\n"); pDevice->rx_buf_sz = MAX_TOTAL_SIZE_WITH_ALL_HEADERS; if (device_alloc_bufs(pDevice) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " device_alloc_bufs fail... \n"); return -ENOMEM; } if (device_init_defrag_cb(pDevice)== FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " Initial defragement cb fail \n"); goto free_rx_tx; } MP_CLEAR_FLAG(pDevice, fMP_DISCONNECTED); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); MP_SET_FLAG(pDevice, fMP_POST_READS); MP_SET_FLAG(pDevice, fMP_POST_WRITES); //read config file Read_config_file(pDevice); if (device_init_registers(pDevice, DEVICE_INIT_COLD) == FALSE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " init register fail\n"); goto free_all; } device_set_multi(pDevice->dev); // Init for Key Management KeyvInitTable(pDevice,&pDevice->sKey); memcpy(pDevice->sMgmtObj.abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN); memcpy(pDevice->dev->dev_addr, pDevice->abyCurrentNetAddr, ETH_ALEN); pDevice->bStopTx0Pkt = FALSE; pDevice->bStopDataPkt = FALSE; pDevice->bRoaming = FALSE; pDevice->bIsRoaming = FALSE; pDevice->bEnableRoaming = FALSE; if (pDevice->bDiversityRegCtlON) { device_init_diversity_timer(pDevice); } vMgrObjectInit(pDevice); tasklet_init(&pDevice->RxMngWorkItem, (void *)RXvMngWorkItem, (unsigned long)pDevice); tasklet_init(&pDevice->ReadWorkItem, (void *)RXvWorkItem, (unsigned long)pDevice); tasklet_init(&pDevice->EventWorkItem, (void *)INTvWorkItem, (unsigned long)pDevice); add_timer(&(pDevice->sMgmtObj.sTimerSecondCallback)); pDevice->int_interval = 100; //Max 100 microframes. pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; pDevice->bIsRxWorkItemQueued = TRUE; pDevice->fKillEventPollingThread = FALSE; pDevice->bEventAvailable = FALSE; pDevice->bWPADEVUp = FALSE; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT pDevice->bwextstep0 = FALSE; pDevice->bwextstep1 = FALSE; pDevice->bwextstep2 = FALSE; pDevice->bwextstep3 = FALSE; pDevice->bWPASuppWextEnabled = FALSE; #endif pDevice->byReAssocCount = 0; RXvWorkItem(pDevice); INTvWorkItem(pDevice); // Patch: if WEP key already set by iwconfig but device not yet open if ((pDevice->bEncryptionEnable == TRUE) && (pDevice->bTransmitKey == TRUE)) { spin_lock_irq(&pDevice->lock); KeybSetDefaultKey( pDevice, &(pDevice->sKey), pDevice->byKeyIndex | (1 << 31), pDevice->uKeyLength, NULL, pDevice->abyKey, KEY_CTL_WEP ); spin_unlock_irq(&pDevice->lock); pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled; } if (pDevice->sMgmtObj.eConfigMode == WMAC_CONFIG_AP) { bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL); } else { //mike:mark@2008-11-10 bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL); /* bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); */ } netif_stop_queue(pDevice->dev); pDevice->flags |= DEVICE_FLAGS_OPENED; { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_UPDEV_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success.. \n"); return 0; free_all: device_free_frag_bufs(pDevice); free_rx_tx: device_free_rx_bufs(pDevice); device_free_tx_bufs(pDevice); device_free_int_bufs(pDevice); usb_kill_urb(pDevice->pControlURB); usb_kill_urb(pDevice->pInterruptURB); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open fail.. \n"); return -ENOMEM; } static int device_close(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); int uu; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close1 \n"); if (pDevice == NULL) return -ENODEV; { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.flags = RT_DOWNDEV_EVENT_FLAG; wireless_send_event(pDevice->dev, IWEVCUSTOM, &wrqu, NULL); } if (pDevice->bLinkPass) { bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL); mdelay(30); } device_release_WPADEV(pDevice); memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1); pMgmt->bShareKeyAlgorithm = FALSE; pDevice->bEncryptionEnable = FALSE; pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled; spin_lock_irq(&pDevice->lock); for (uu = 0; uu < MAX_KEY_TABLE; uu++) MACvDisableKeyEntry(pDevice,uu); spin_unlock_irq(&pDevice->lock); if ((pDevice->flags & DEVICE_FLAGS_UNPLUG) == FALSE) { MACbShutdown(pDevice); } netif_stop_queue(pDevice->dev); MP_SET_FLAG(pDevice, fMP_DISCONNECTED); MP_CLEAR_FLAG(pDevice, fMP_POST_WRITES); MP_CLEAR_FLAG(pDevice, fMP_POST_READS); pDevice->fKillEventPollingThread = TRUE; del_timer(&pDevice->sTimerCommand); del_timer(&pMgmt->sTimerSecondCallback); del_timer(&pDevice->sTimerTxData); if (pDevice->bDiversityRegCtlON) { del_timer(&pDevice->TimerSQ3Tmax1); del_timer(&pDevice->TimerSQ3Tmax2); del_timer(&pDevice->TimerSQ3Tmax3); } tasklet_kill(&pDevice->RxMngWorkItem); tasklet_kill(&pDevice->ReadWorkItem); tasklet_kill(&pDevice->EventWorkItem); pDevice->bRoaming = FALSE; pDevice->bIsRoaming = FALSE; pDevice->bEnableRoaming = FALSE; pDevice->bCmdRunning = FALSE; pDevice->bLinkPass = FALSE; memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; pDevice->flags &= ~DEVICE_FLAGS_OPENED; device_free_tx_bufs(pDevice); device_free_rx_bufs(pDevice); device_free_int_bufs(pDevice); device_free_frag_bufs(pDevice); usb_kill_urb(pDevice->pControlURB); usb_kill_urb(pDevice->pInterruptURB); usb_free_urb(pDevice->pControlURB); usb_free_urb(pDevice->pInterruptURB); BSSvClearNodeDBTable(pDevice, 0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); return 0; } static void __devexit vt6656_disconnect(struct usb_interface *intf) { PSDevice device = usb_get_intfdata(intf); if (!device) return; { union iwreq_data req; memset(&req, 0, sizeof(req)); req.data.flags = RT_RMMOD_EVENT_FLAG; wireless_send_event(device->dev, IWEVCUSTOM, &req, NULL); } device_release_WPADEV(device); if (device->firmware) release_firmware(device->firmware); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); device->flags |= DEVICE_FLAGS_UNPLUG; if (device->dev) { unregister_netdev(device->dev); wpa_set_wpadev(device, 0); free_netdev(device->dev); } } static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) { PSDevice pDevice = netdev_priv(dev); spin_lock_irq(&pDevice->lock); if (unlikely(pDevice->bStopTx0Pkt)) dev_kfree_skb_irq(skb); else vDMA0_tx_80211(pDevice, skb); spin_unlock_irq(&pDevice->lock); return NETDEV_TX_OK; } static int device_xmit(struct sk_buff *skb, struct net_device *dev) { PSDevice pDevice = netdev_priv(dev); struct net_device_stats *stats = &pDevice->stats; spin_lock_irq(&pDevice->lock); netif_stop_queue(dev); if (!pDevice->bLinkPass) { dev_kfree_skb_irq(skb); goto out; } if (pDevice->bStopDataPkt) { dev_kfree_skb_irq(skb); stats->tx_dropped++; goto out; } if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb)) { if (netif_queue_stopped(dev)) netif_wake_queue(dev); } out: spin_unlock_irq(&pDevice->lock); return NETDEV_TX_OK; } static unsigned const ethernet_polynomial = 0x04c11db7U; static inline u32 ether_crc(int length, unsigned char *data) { int crc = -1; while(--length >= 0) { unsigned char current_octet = *data++; int bit; for (bit = 0; bit < 8; bit++, current_octet >>= 1) { crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); } } return crc; } //find out the start position of str2 from str1 static unsigned char *kstrstr(const unsigned char *str1, const unsigned char *str2) { int str1_len = strlen(str1); int str2_len = strlen(str2); while (str1_len >= str2_len) { str1_len--; if(memcmp(str1,str2,str2_len)==0) return (unsigned char *) str1; str1++; } return NULL; } static int Config_FileGetParameter(unsigned char *string, unsigned char *dest, unsigned char *source) { unsigned char buf1[100]; unsigned char buf2[100]; unsigned char *start_p = NULL, *end_p = NULL, *tmp_p = NULL; int ii; memset(buf1,0,100); strcat(buf1, string); strcat(buf1, "="); source+=strlen(buf1); //find target string start point start_p = kstrstr(source,buf1); if (start_p == NULL) return FALSE; //check if current config line is marked by "#" ?? for (ii = 1; ; ii++) { if (memcmp(start_p - ii, "\n", 1) == 0) break; if (memcmp(start_p - ii, "#", 1) == 0) return FALSE; } //find target string end point end_p = kstrstr(start_p,"\n"); if (end_p == NULL) { //can't find "\n",but don't care end_p=start_p+strlen(start_p); //no include "\n" } memset(buf2,0,100); memcpy(buf2,start_p,end_p-start_p); //get the tartget line buf2[end_p-start_p]='\0'; //find value start_p = kstrstr(buf2,"="); if (start_p == NULL) return FALSE; memset(buf1,0,100); strcpy(buf1,start_p+1); //except space tmp_p = buf1; while(*tmp_p != 0x00) { if(*tmp_p==' ') tmp_p++; else break; } memcpy(dest,tmp_p,strlen(tmp_p)); return TRUE; } //if read fail,return NULL,or return data pointer; static unsigned char *Config_FileOperation(PSDevice pDevice) { unsigned char *config_path = CONFIG_PATH; unsigned char *buffer = NULL; struct file *filp=NULL; mm_segment_t old_fs = get_fs(); //int oldfsuid=0,oldfsgid=0; int result = 0; set_fs (KERNEL_DS); /* Can't do this anymore, so we rely on correct filesystem permissions: //Make sure a caller can read or write power as root oldfsuid=current->fsuid; oldfsgid=current->fsgid; current->fsuid = 0; current->fsgid = 0; */ //open file filp = filp_open(config_path, O_RDWR, 0); if (IS_ERR(filp)) { printk("Config_FileOperation file Not exist\n"); result=-1; goto error2; } if(!(filp->f_op) || !(filp->f_op->read) ||!(filp->f_op->write)) { printk("file %s cann't readable or writable?\n",config_path); result = -1; goto error1; } buffer = kmalloc(1024, GFP_KERNEL); if(buffer==NULL) { printk("allocate mem for file fail?\n"); result = -1; goto error1; } if(filp->f_op->read(filp, buffer, 1024, &filp->f_pos)<0) { printk("read file error?\n"); result = -1; } error1: if(filp_close(filp,NULL)) printk("Config_FileOperation:close file fail\n"); error2: set_fs (old_fs); /* current->fsuid=oldfsuid; current->fsgid=oldfsgid; */ if(result!=0) { kfree(buffer); buffer=NULL; } return buffer; } //return --->-1:fail; >=0:successful static int Read_config_file(PSDevice pDevice) { int result = 0; unsigned char tmpbuffer[100]; unsigned char *buffer = NULL; //init config setting pDevice->config_file.ZoneType = -1; pDevice->config_file.eAuthenMode = -1; pDevice->config_file.eEncryptionStatus = -1; buffer = Config_FileOperation(pDevice); if (buffer == NULL) { result =-1; return result; } //get zonetype { memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer) ==TRUE) { if(memcmp(tmpbuffer,"USA",3)==0) { pDevice->config_file.ZoneType=ZoneType_USA; } else if(memcmp(tmpbuffer,"JAPAN",5)==0) { pDevice->config_file.ZoneType=ZoneType_Japan; } else if(memcmp(tmpbuffer,"EUROPE",6)==0) { pDevice->config_file.ZoneType=ZoneType_Europe; } else { printk("Unknown Zonetype[%s]?\n",tmpbuffer); } } } //get other parameter { memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("AUTHENMODE",tmpbuffer,buffer)==TRUE) { pDevice->config_file.eAuthenMode = (int) simple_strtol(tmpbuffer, NULL, 10); } memset(tmpbuffer,0,sizeof(tmpbuffer)); if(Config_FileGetParameter("ENCRYPTIONMODE",tmpbuffer,buffer)==TRUE) { pDevice->config_file.eEncryptionStatus= (int) simple_strtol(tmpbuffer, NULL, 10); } } kfree(buffer); return result; } static void device_set_multi(struct net_device *dev) { PSDevice pDevice = (PSDevice) netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); u32 mc_filter[2]; int ii; struct netdev_hw_addr *ha; BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; BYTE byTmpMode = 0; int rc; spin_lock_irq(&pDevice->lock); rc = CONTROLnsRequestIn(pDevice, MESSAGE_TYPE_READ, MAC_REG_RCR, MESSAGE_REQUEST_MACREG, 1, &byTmpMode ); if (rc == 0) pDevice->byRxMode = byTmpMode; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode in= %x\n", pDevice->byRxMode); if (dev->flags & IFF_PROMISC) { // Set promiscuous. DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); // Unconditionally log net taps. pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST); } else if ((netdev_mc_count(dev) > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) { CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_WRITE, MAC_REG_MAR0, MESSAGE_REQUEST_MACREG, 8, pbyData ); pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); } else { memset(mc_filter, 0, sizeof(mc_filter)); netdev_for_each_mc_addr(ha, dev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); } for (ii = 0; ii < 4; ii++) { MACvWriteMultiAddr(pDevice, ii, *((PBYTE)&mc_filter[0] + ii)); MACvWriteMultiAddr(pDevice, ii+ 4, *((PBYTE)&mc_filter[1] + ii)); } pDevice->byRxMode &= ~(RCR_UNICAST); pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); } if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { // If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac. pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST); pDevice->byRxMode &= ~(RCR_UNICAST); } ControlvWriteByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_RCR, pDevice->byRxMode); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode out= %x\n", pDevice->byRxMode); spin_unlock_irq(&pDevice->lock); } static struct net_device_stats *device_get_stats(struct net_device *dev) { PSDevice pDevice=(PSDevice) netdev_priv(dev); return &pDevice->stats; } static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { PSDevice pDevice = (PSDevice)netdev_priv(dev); PSMgmtObject pMgmt = &(pDevice->sMgmtObj); PSCmdRequest pReq; //BOOL bCommit = FALSE; struct iwreq *wrq = (struct iwreq *) rq; int rc =0; if (pMgmt == NULL) { rc = -EFAULT; return rc; } switch(cmd) { case SIOCGIWNAME: rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL); break; case SIOCSIWNWID: case SIOCGIWNWID: //0x8b03 support rc = -EOPNOTSUPP; break; // Set frequency/channel case SIOCSIWFREQ: rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL); break; // Get frequency/channel case SIOCGIWFREQ: rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL); break; // Set desired network name (ESSID) case SIOCSIWESSID: { char essid[IW_ESSID_MAX_SIZE+1]; if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) { rc = -E2BIG; break; } if (copy_from_user(essid, wrq->u.essid.pointer, wrq->u.essid.length)) { rc = -EFAULT; break; } rc = iwctl_siwessid(dev, NULL, &(wrq->u.essid), essid); } break; // Get current network name (ESSID) case SIOCGIWESSID: { char essid[IW_ESSID_MAX_SIZE+1]; if (wrq->u.essid.pointer) { iwctl_giwessid(dev, NULL, &(wrq->u.essid), essid); if (copy_to_user(wrq->u.essid.pointer, essid, wrq->u.essid.length) ) rc = -EFAULT; } } break; case SIOCSIWAP: rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL); break; // Get current Access Point (BSSID) case SIOCGIWAP: rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL); break; // Set desired station name case SIOCSIWNICKN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWNICKN \n"); rc = -EOPNOTSUPP; break; // Get current station name case SIOCGIWNICKN: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWNICKN \n"); rc = -EOPNOTSUPP; break; // Set the desired bit-rate case SIOCSIWRATE: rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL); break; // Get the current bit-rate case SIOCGIWRATE: iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL); break; // Set the desired RTS threshold case SIOCSIWRTS: rc = iwctl_siwrts(dev, &(wrq->u.rts)); break; // Get the current RTS threshold case SIOCGIWRTS: rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL); break; // Set the desired fragmentation threshold case SIOCSIWFRAG: rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL); break; // Get the current fragmentation threshold case SIOCGIWFRAG: rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL); break; // Set mode of operation case SIOCSIWMODE: rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL); break; // Get mode of operation case SIOCGIWMODE: iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL); break; // Set WEP keys and mode case SIOCSIWENCODE: { char abyKey[WLAN_WEP232_KEYLEN]; if (wrq->u.encoding.pointer) { if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) { rc = -E2BIG; break; } memset(abyKey, 0, WLAN_WEP232_KEYLEN); if (copy_from_user(abyKey, wrq->u.encoding.pointer, wrq->u.encoding.length)) { rc = -EFAULT; break; } } else if (wrq->u.encoding.length != 0) { rc = -EINVAL; break; } rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey); } break; // Get the WEP keys and mode case SIOCGIWENCODE: if (!capable(CAP_NET_ADMIN)) { rc = -EPERM; break; } { char abyKey[WLAN_WEP232_KEYLEN]; rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey); if (rc != 0) break; if (wrq->u.encoding.pointer) { if (copy_to_user(wrq->u.encoding.pointer, abyKey, wrq->u.encoding.length)) rc = -EFAULT; } } break; // Get the current Tx-Power case SIOCGIWTXPOW: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n"); rc = -EOPNOTSUPP; break; case SIOCSIWTXPOW: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n"); rc = -EOPNOTSUPP; break; case SIOCSIWRETRY: rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL); break; case SIOCGIWRETRY: rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL); break; // Get range of parameters case SIOCGIWRANGE: { struct iw_range range; iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range); if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range))) rc = -EFAULT; } break; case SIOCGIWPOWER: rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL); break; case SIOCSIWPOWER: rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL); break; case SIOCGIWSENS: rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL); break; case SIOCSIWSENS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSENS \n"); rc = -EOPNOTSUPP; break; case SIOCGIWAPLIST: { char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))]; if (wrq->u.data.pointer) { rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer); if (rc == 0) { if (copy_to_user(wrq->u.data.pointer, buffer, (wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality))) )) rc = -EFAULT; } } } break; #ifdef WIRELESS_SPY // Set the spy list case SIOCSIWSPY: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n"); rc = -EOPNOTSUPP; break; // Get the spy list case SIOCGIWSPY: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n"); rc = -EOPNOTSUPP; break; #endif // WIRELESS_SPY case SIOCGIWPRIV: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPRIV \n"); rc = -EOPNOTSUPP; /* if(wrq->u.data.pointer) { wrq->u.data.length = sizeof(iwctl_private_args) / sizeof( iwctl_private_args[0]); if(copy_to_user(wrq->u.data.pointer, (u_char *) iwctl_private_args, sizeof(iwctl_private_args))) rc = -EFAULT; } */ break; #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT case SIOCSIWAUTH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH\n"); rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL); break; case SIOCGIWAUTH: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAUTH \n"); rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL); break; case SIOCSIWGENIE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWGENIE \n"); rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; case SIOCGIWGENIE: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWGENIE \n"); rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; case SIOCSIWENCODEEXT: { char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODEEXT \n"); if(wrq->u.encoding.pointer){ memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1); if(wrq->u.encoding.length > (sizeof(struct iw_encode_ext)+ MAX_KEY_LEN)){ rc = -E2BIG; break; } if(copy_from_user(extra, wrq->u.encoding.pointer,wrq->u.encoding.length)){ rc = -EFAULT; break; } }else if(wrq->u.encoding.length != 0){ rc = -EINVAL; break; } rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra); } break; case SIOCGIWENCODEEXT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODEEXT \n"); rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL); break; case SIOCSIWMLME: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMLME \n"); rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer); break; #endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT case IOCTL_CMD_TEST: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } pReq = (PSCmdRequest)rq; //20080130-01,<Remark> by Mike Liu // if(pDevice->bLinkPass==TRUE) pReq->wResult = MAGIC_CODE; //Linking status:0x3142 //20080130-02,<Remark> by Mike Liu // else // pReq->wResult = MAGIC_CODE+1; //disconnect status:0x3143 break; case IOCTL_CMD_SET: if (!(pDevice->flags & DEVICE_FLAGS_OPENED) && (((PSCmdRequest)rq)->wCmdCode !=WLAN_CMD_SET_WPA)) { rc = -EFAULT; break; } else { rc = 0; } if (test_and_set_bit( 0, (void*)&(pMgmt->uCmdBusy))) { return -EBUSY; } rc = private_ioctl(pDevice, rq); clear_bit( 0, (void*)&(pMgmt->uCmdBusy)); break; case IOCTL_CMD_HOSTAPD: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } rc = vt6656_hostap_ioctl(pDevice, &wrq->u.data); break; case IOCTL_CMD_WPA: if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) { rc = -EFAULT; break; } else { rc = 0; } rc = wpa_ioctl(pDevice, &wrq->u.data); break; case SIOCETHTOOL: return ethtool_ioctl(dev, (void *) rq->ifr_data); // All other calls are currently unsupported default: rc = -EOPNOTSUPP; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Ioctl command not support..%x\n", cmd); } if (pDevice->bCommit) { if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { netif_stop_queue(pDevice->dev); spin_lock_irq(&pDevice->lock); bScheduleCommand((void *) pDevice, WLAN_CMD_RUN_AP, NULL); spin_unlock_irq(&pDevice->lock); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n"); spin_lock_irq(&pDevice->lock); //2007-1121-01<Modify>by EinsnLiu if (pDevice->bLinkPass && memcmp(pMgmt->abyCurrSSID,pMgmt->abyDesireSSID,WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN)) { bScheduleCommand((void *) pDevice, WLAN_CMD_DISASSOCIATE, NULL); } else { pDevice->bLinkPass = FALSE; pMgmt->eCurrState = WMAC_STATE_IDLE; memset(pMgmt->abyCurrBSSID, 0, 6); } ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_SLOW); //End Modify netif_stop_queue(pDevice->dev); #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT pMgmt->eScanType = WMAC_SCAN_ACTIVE; if (!pDevice->bWPASuppWextEnabled) #endif bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID); bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL); spin_unlock_irq(&pDevice->lock); } pDevice->bCommit = FALSE; } return rc; } static int ethtool_ioctl(struct net_device *dev, void *useraddr) { u32 ethcmd; if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) return -EFAULT; switch (ethcmd) { case ETHTOOL_GDRVINFO: { struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO}; strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1); strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1); if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; } } return -EOPNOTSUPP; } /*------------------------------------------------------------------*/ MODULE_DEVICE_TABLE(usb, vt6656_table); static struct usb_driver vt6656_driver = { .name = DEVICE_NAME, .probe = vt6656_probe, .disconnect = vt6656_disconnect, .id_table = vt6656_table, #ifdef CONFIG_PM .suspend = vt6656_suspend, .resume = vt6656_resume, #endif /* CONFIG_PM */ }; module_usb_driver(vt6656_driver);
gpl-2.0
Meninblack007/android_kernel_yu_msm8916
drivers/uio/uio_dmem_genirq.c
2142
9367
/* * drivers/uio/uio_dmem_genirq.c * * Userspace I/O platform driver with generic IRQ handling code. * * Copyright (C) 2012 Damian Hobson-Garcia * * Based on uio_pdrv_genirq.c by Magnus Damm * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_data/uio_dmem_genirq.h> #include <linux/stringify.h> #include <linux/pm_runtime.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> #define DRIVER_NAME "uio_dmem_genirq" #define DMEM_MAP_ERROR (~0) struct uio_dmem_genirq_platdata { struct uio_info *uioinfo; spinlock_t lock; unsigned long flags; struct platform_device *pdev; unsigned int dmem_region_start; unsigned int num_dmem_regions; void *dmem_region_vaddr[MAX_UIO_MAPS]; struct mutex alloc_lock; unsigned int refcnt; }; static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode) { struct uio_dmem_genirq_platdata *priv = info->priv; struct uio_mem *uiomem; int ret = 0; int dmem_region = priv->dmem_region_start; uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; mutex_lock(&priv->alloc_lock); while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { void *addr; if (!uiomem->size) break; addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size, (dma_addr_t *)&uiomem->addr, GFP_KERNEL); if (!addr) { uiomem->addr = DMEM_MAP_ERROR; } priv->dmem_region_vaddr[dmem_region++] = addr; ++uiomem; } priv->refcnt++; mutex_unlock(&priv->alloc_lock); /* Wait until the Runtime PM code has woken up the device */ pm_runtime_get_sync(&priv->pdev->dev); return ret; } static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode) { struct uio_dmem_genirq_platdata *priv = info->priv; struct uio_mem *uiomem; int dmem_region = priv->dmem_region_start; /* Tell the Runtime PM code that the device has become idle */ pm_runtime_put_sync(&priv->pdev->dev); uiomem = &priv->uioinfo->mem[priv->dmem_region_start]; mutex_lock(&priv->alloc_lock); priv->refcnt--; while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) { if (!uiomem->size) break; if (priv->dmem_region_vaddr[dmem_region]) { dma_free_coherent(&priv->pdev->dev, uiomem->size, priv->dmem_region_vaddr[dmem_region], uiomem->addr); } uiomem->addr = DMEM_MAP_ERROR; ++dmem_region; ++uiomem; } mutex_unlock(&priv->alloc_lock); return 0; } static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info) { struct uio_dmem_genirq_platdata *priv = dev_info->priv; /* Just disable the interrupt in the interrupt controller, and * remember the state so we can allow user space to enable it later. */ if (!test_and_set_bit(0, &priv->flags)) disable_irq_nosync(irq); return IRQ_HANDLED; } static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) { struct uio_dmem_genirq_platdata *priv = dev_info->priv; unsigned long flags; /* Allow user space to enable and disable the interrupt * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * * Serialize this operation to support multiple tasks. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); } else { if (!test_and_set_bit(0, &priv->flags)) disable_irq(dev_info->irq); } spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int uio_dmem_genirq_probe(struct platform_device *pdev) { struct uio_dmem_genirq_pdata *pdata = pdev->dev.platform_data; struct uio_info *uioinfo = &pdata->uioinfo; struct uio_dmem_genirq_platdata *priv; struct uio_mem *uiomem; int ret = -EINVAL; int i; if (pdev->dev.of_node) { int irq; /* alloc uioinfo for one device */ uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); if (!uioinfo) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad2; } uioinfo->name = pdev->dev.of_node->name; uioinfo->version = "devicetree"; /* Multiple IRQs are not supported */ irq = platform_get_irq(pdev, 0); if (irq == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else uioinfo->irq = irq; } if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_err(&pdev->dev, "missing platform_data\n"); goto bad0; } if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags & IRQF_SHARED) { dev_err(&pdev->dev, "interrupt configuration error\n"); goto bad0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad0; } dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); priv->uioinfo = uioinfo; spin_lock_init(&priv->lock); priv->flags = 0; /* interrupt is enabled to begin with */ priv->pdev = pdev; mutex_init(&priv->alloc_lock); if (!uioinfo->irq) { ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); goto bad0; } uioinfo->irq = ret; } uiomem = &uioinfo->mem[0]; for (i = 0; i < pdev->num_resources; ++i) { struct resource *r = &pdev->resource[i]; if (r->flags != IORESOURCE_MEM) continue; if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " I/O memory resources.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = r->start; uiomem->size = resource_size(r); ++uiomem; } priv->dmem_region_start = i; priv->num_dmem_regions = pdata->num_dynamic_regions; for (i = 0; i < pdata->num_dynamic_regions; ++i) { if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " dynamic and fixed memory regions.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = DMEM_MAP_ERROR; uiomem->size = pdata->dynamic_region_sizes[i]; ++uiomem; } while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { uiomem->size = 0; ++uiomem; } /* This driver requires no hardware specific kernel code to handle * interrupts. Instead, the interrupt handler simply disables the * interrupt in the interrupt controller. User space is responsible * for performing hardware specific acknowledge and re-enabling of * the interrupt in the interrupt controller. * * Interrupt sharing is not supported. */ uioinfo->handler = uio_dmem_genirq_handler; uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol; uioinfo->open = uio_dmem_genirq_open; uioinfo->release = uio_dmem_genirq_release; uioinfo->priv = priv; /* Enable Runtime PM for this device: * The device starts in suspended state to allow the hardware to be * turned off by default. The Runtime PM bus code should power on the * hardware and enable clocks at open(). */ pm_runtime_enable(&pdev->dev); ret = uio_register_device(&pdev->dev, priv->uioinfo); if (ret) { dev_err(&pdev->dev, "unable to register uio device\n"); goto bad1; } platform_set_drvdata(pdev, priv); return 0; bad1: kfree(priv); pm_runtime_disable(&pdev->dev); bad0: /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(uioinfo); bad2: return ret; } static int uio_dmem_genirq_remove(struct platform_device *pdev) { struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev); uio_unregister_device(priv->uioinfo); pm_runtime_disable(&pdev->dev); priv->uioinfo->handler = NULL; priv->uioinfo->irqcontrol = NULL; /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(priv->uioinfo); kfree(priv); return 0; } static int uio_dmem_genirq_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() * are used at open() and release() time. This allows the * Runtime PM code to turn off power to the device while the * device is unused, ie before open() and after release(). * * This Runtime PM callback does not need to save or restore * any registers since user space is responsbile for hardware * register reinitialization after open(). */ return 0; } static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = { .runtime_suspend = uio_dmem_genirq_runtime_nop, .runtime_resume = uio_dmem_genirq_runtime_nop, }; #ifdef CONFIG_OF static const struct of_device_id uio_of_genirq_match[] = { { /* empty for now */ }, }; MODULE_DEVICE_TABLE(of, uio_of_genirq_match); #else # define uio_of_genirq_match NULL #endif static struct platform_driver uio_dmem_genirq = { .probe = uio_dmem_genirq_probe, .remove = uio_dmem_genirq_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &uio_dmem_genirq_dev_pm_ops, .of_match_table = uio_of_genirq_match, }, }; module_platform_driver(uio_dmem_genirq); MODULE_AUTHOR("Damian Hobson-Garcia"); MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory."); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
zarboz/Beastmode.Evita.2.0
drivers/gpio/wm831x-gpio.c
2142
7657
/* * wm831x-gpio.c -- gpiolib support for Wolfson WM831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/mfd/core.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/gpio.h> #include <linux/mfd/wm831x/irq.h> struct wm831x_gpio { struct wm831x *wm831x; struct gpio_chip gpio_chip; }; static inline struct wm831x_gpio *to_wm831x_gpio(struct gpio_chip *chip) { return container_of(chip, struct wm831x_gpio, gpio_chip); } static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = WM831X_GPN_DIR; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); } static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); if (ret < 0) return ret; if (ret & 1 << offset) return 1; else return 0; } static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, value << offset); } static int wm831x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = 0; int ret; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); if (ret < 0) return ret; /* Can only set GPIO state once it's in output mode */ wm831x_gpio_set(chip, offset, value); return 0; } static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; if (!wm831x->irq_base) return -EINVAL; return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset; } static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, unsigned debounce) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int reg = WM831X_GPIO1_CONTROL + offset; int ret, fn; ret = wm831x_reg_read(wm831x, reg); if (ret < 0) return ret; switch (ret & WM831X_GPN_FN_MASK) { case 0: case 1: break; default: /* Not in GPIO mode */ return -EBUSY; } if (debounce >= 32 && debounce <= 64) fn = 0; else if (debounce >= 4000 && debounce <= 8000) fn = 1; else return -EINVAL; return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn); } #ifdef CONFIG_DEBUG_FS static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int i, tristated; for (i = 0; i < chip->ngpio; i++) { int gpio = i + chip->base; int reg; const char *label, *pull, *powerdomain; /* We report the GPIO even if it's not requested since * we're also reporting things like alternate * functions which apply even when the GPIO is not in * use as a GPIO. */ label = gpiochip_is_requested(chip, i); if (!label) label = "Unrequested"; seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label); reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i); if (reg < 0) { dev_err(wm831x->dev, "GPIO control %d read failed: %d\n", gpio, reg); seq_printf(s, "\n"); continue; } switch (reg & WM831X_GPN_PULL_MASK) { case WM831X_GPIO_PULL_NONE: pull = "nopull"; break; case WM831X_GPIO_PULL_DOWN: pull = "pulldown"; break; case WM831X_GPIO_PULL_UP: pull = "pullup"; break; default: pull = "INVALID PULL"; break; } switch (i + 1) { case 1 ... 3: case 7 ... 9: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "VPMIC"; else powerdomain = "DBVDD"; break; case 4 ... 6: case 10 ... 12: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "SYSVDD"; else powerdomain = "DBVDD"; break; case 13 ... 16: powerdomain = "TPVDD"; break; default: BUG(); break; } tristated = reg & WM831X_GPN_TRI; if (wm831x->has_gpio_ena) tristated = !tristated; seq_printf(s, " %s %s %s %s%s\n" " %s%s (0x%4x)\n", reg & WM831X_GPN_DIR ? "in" : "out", wm831x_gpio_get(chip, i) ? "high" : "low", pull, powerdomain, reg & WM831X_GPN_POL ? "" : " inverted", reg & WM831X_GPN_OD ? "open-drain" : "CMOS", tristated ? " tristated" : "", reg); } } #else #define wm831x_gpio_dbg_show NULL #endif static struct gpio_chip template_chip = { .label = "wm831x", .owner = THIS_MODULE, .direction_input = wm831x_gpio_direction_in, .get = wm831x_gpio_get, .direction_output = wm831x_gpio_direction_out, .set = wm831x_gpio_set, .to_irq = wm831x_gpio_to_irq, .set_debounce = wm831x_gpio_set_debounce, .dbg_show = wm831x_gpio_dbg_show, .can_sleep = 1, }; static int __devinit wm831x_gpio_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; struct wm831x_gpio *wm831x_gpio; int ret; wm831x_gpio = kzalloc(sizeof(*wm831x_gpio), GFP_KERNEL); if (wm831x_gpio == NULL) return -ENOMEM; wm831x_gpio->wm831x = wm831x; wm831x_gpio->gpio_chip = template_chip; wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio; wm831x_gpio->gpio_chip.dev = &pdev->dev; if (pdata && pdata->gpio_base) wm831x_gpio->gpio_chip.base = pdata->gpio_base; else wm831x_gpio->gpio_chip.base = -1; ret = gpiochip_add(&wm831x_gpio->gpio_chip); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); goto err; } platform_set_drvdata(pdev, wm831x_gpio); return ret; err: kfree(wm831x_gpio); return ret; } static int __devexit wm831x_gpio_remove(struct platform_device *pdev) { struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev); int ret; ret = gpiochip_remove(&wm831x_gpio->gpio_chip); if (ret == 0) kfree(wm831x_gpio); return ret; } static struct platform_driver wm831x_gpio_driver = { .driver.name = "wm831x-gpio", .driver.owner = THIS_MODULE, .probe = wm831x_gpio_probe, .remove = __devexit_p(wm831x_gpio_remove), }; static int __init wm831x_gpio_init(void) { return platform_driver_register(&wm831x_gpio_driver); } subsys_initcall(wm831x_gpio_init); static void __exit wm831x_gpio_exit(void) { platform_driver_unregister(&wm831x_gpio_driver); } module_exit(wm831x_gpio_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("GPIO interface for WM831x PMICs"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-gpio");
gpl-2.0
Hani-K/Simplicity_Kernel_Exynos5433_LL
drivers/xen/platform-pci.c
2398
4677
/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <xen/platform_pci.h> #include <xen/grant_table.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #define DRV_NAME "xen-platform-pci" MODULE_AUTHOR("ssmith@xensource.com and stefano.stabellini@eu.citrix.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; static uint64_t callback_via; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ pin = pdev->pin; /* We don't know the GSI. Specify the PCI INTx line instead. */ return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3); } static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) { xen_hvm_evtchn_do_upcall(); return IRQ_HANDLED; } static int xen_allocate_irq(struct pci_dev *pdev) { return request_irq(pdev->irq, do_hvm_evtchn_intr, IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "xen-platform-pci", pdev); } static int platform_pci_resume(struct pci_dev *pdev) { int err; if (xen_have_vector_callback) return 0; err = xen_set_callback_via(callback_via); if (err) { dev_err(&pdev->dev, "platform_pci_resume failure!\n"); return err; } return 0; } static int platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr; long mmio_addr, mmio_len; unsigned int max_nr_gframes; if (!xen_domain()) return -ENODEV; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); if (mmio_addr == 0 || ioaddr == 0) { dev_err(&pdev->dev, "no resources found\n"); ret = -ENOENT; goto pci_out; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) goto pci_out; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; if (!xen_have_vector_callback) { ret = xen_allocate_irq(pdev); if (ret) { dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); goto out; } callback_via = get_callback_via(pdev); ret = xen_set_callback_via(callback_via); if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); goto out; } } max_nr_gframes = gnttab_max_grant_frames(); xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_init(); if (ret) goto out; xenbus_probe(NULL); return 0; out: pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); pci_out: pci_disable_device(pdev); return ret; } static struct pci_device_id platform_pci_tbl[] = { {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_init, .id_table = platform_pci_tbl, #ifdef CONFIG_PM .resume_early = platform_pci_resume, #endif }; static int __init platform_pci_module_init(void) { return pci_register_driver(&platform_driver); } module_init(platform_pci_module_init);
gpl-2.0
svtronics/kernel-pandaboard-ES-RevB3
arch/arm/mach-sa1100/generic.c
4702
10504
/* * linux/arch/arm/mach-sa1100/generic.c * * Author: Nicolas Pitre * * Code common to all SA11x0 machines. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/pm.h> #include <linux/cpufreq.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <video/sa1100fb.h> #include <asm/div64.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <asm/irq.h> #include <asm/system_misc.h> #include <mach/hardware.h> #include <mach/irqs.h> #include "generic.h" unsigned int reset_status; EXPORT_SYMBOL(reset_status); #define NR_FREQS 16 /* * This table is setup for a 3.6864MHz Crystal. */ static const unsigned short cclk_frequency_100khz[NR_FREQS] = { 590, /* 59.0 MHz */ 737, /* 73.7 MHz */ 885, /* 88.5 MHz */ 1032, /* 103.2 MHz */ 1180, /* 118.0 MHz */ 1327, /* 132.7 MHz */ 1475, /* 147.5 MHz */ 1622, /* 162.2 MHz */ 1769, /* 176.9 MHz */ 1917, /* 191.7 MHz */ 2064, /* 206.4 MHz */ 2212, /* 221.2 MHz */ 2359, /* 235.9 MHz */ 2507, /* 250.7 MHz */ 2654, /* 265.4 MHz */ 2802 /* 280.2 MHz */ }; /* rounds up(!) */ unsigned int sa11x0_freq_to_ppcr(unsigned int khz) { int i; khz /= 100; for (i = 0; i < NR_FREQS; i++) if (cclk_frequency_100khz[i] >= khz) break; return i; } unsigned int sa11x0_ppcr_to_freq(unsigned int idx) { unsigned int freq = 0; if (idx < NR_FREQS) freq = cclk_frequency_100khz[idx] * 100; return freq; } /* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on * this platform, anyway. */ int sa11x0_verify_speed(struct cpufreq_policy *policy) { unsigned int tmp; if (policy->cpu) return -EINVAL; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); /* make sure that at least one frequency is within the policy */ tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100; if (tmp > policy->max) policy->max = tmp; cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); return 0; } unsigned int sa11x0_getspeed(unsigned int cpu) { if (cpu) return 0; return cclk_frequency_100khz[PPCR & 0xf] * 100; } /* * Default power-off for SA1100 */ static void sa1100_power_off(void) { mdelay(100); local_irq_disable(); /* disable internal oscillator, float CS lines */ PCFR = (PCFR_OPDE | PCFR_FP | PCFR_FS); /* enable wake-up on GPIO0 (Assabet...) */ PWER = GFER = GRER = 1; /* * set scratchpad to zero, just in case it is used as a * restart address by the bootloader. */ PSPR = 0; /* enter sleep mode */ PMCR = PMCR_SF; } void sa11x0_restart(char mode, const char *cmd) { if (mode == 's') { /* Jump into ROM at address 0 */ soft_restart(0); } else { /* Use on-chip reset capability */ RSRR = RSRR_SWR; } } static void sa11x0_register_device(struct platform_device *dev, void *data) { int err; dev->dev.platform_data = data; err = platform_device_register(dev); if (err) printk(KERN_ERR "Unable to register device %s: %d\n", dev->name, err); } static struct resource sa11x0udc_resources[] = { [0] = DEFINE_RES_MEM(__PREG(Ser0UDCCR), SZ_64K), [1] = DEFINE_RES_IRQ(IRQ_Ser0UDC), }; static u64 sa11x0udc_dma_mask = 0xffffffffUL; static struct platform_device sa11x0udc_device = { .name = "sa11x0-udc", .id = -1, .dev = { .dma_mask = &sa11x0udc_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(sa11x0udc_resources), .resource = sa11x0udc_resources, }; static struct resource sa11x0uart1_resources[] = { [0] = DEFINE_RES_MEM(__PREG(Ser1UTCR0), SZ_64K), [1] = DEFINE_RES_IRQ(IRQ_Ser1UART), }; static struct platform_device sa11x0uart1_device = { .name = "sa11x0-uart", .id = 1, .num_resources = ARRAY_SIZE(sa11x0uart1_resources), .resource = sa11x0uart1_resources, }; static struct resource sa11x0uart3_resources[] = { [0] = DEFINE_RES_MEM(__PREG(Ser3UTCR0), SZ_64K), [1] = DEFINE_RES_IRQ(IRQ_Ser3UART), }; static struct platform_device sa11x0uart3_device = { .name = "sa11x0-uart", .id = 3, .num_resources = ARRAY_SIZE(sa11x0uart3_resources), .resource = sa11x0uart3_resources, }; static struct resource sa11x0mcp_resources[] = { [0] = DEFINE_RES_MEM(__PREG(Ser4MCCR0), SZ_64K), [1] = DEFINE_RES_MEM(__PREG(Ser4MCCR1), 4), [2] = DEFINE_RES_IRQ(IRQ_Ser4MCP), }; static u64 sa11x0mcp_dma_mask = 0xffffffffUL; static struct platform_device sa11x0mcp_device = { .name = "sa11x0-mcp", .id = -1, .dev = { .dma_mask = &sa11x0mcp_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(sa11x0mcp_resources), .resource = sa11x0mcp_resources, }; void __init sa11x0_ppc_configure_mcp(void) { /* Setup the PPC unit for the MCP */ PPDR &= ~PPC_RXD4; PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; PSDR |= PPC_RXD4; PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); } void sa11x0_register_mcp(struct mcp_plat_data *data) { sa11x0_register_device(&sa11x0mcp_device, data); } static struct resource sa11x0ssp_resources[] = { [0] = DEFINE_RES_MEM(0x80070000, SZ_64K), [1] = DEFINE_RES_IRQ(IRQ_Ser4SSP), }; static u64 sa11x0ssp_dma_mask = 0xffffffffUL; static struct platform_device sa11x0ssp_device = { .name = "sa11x0-ssp", .id = -1, .dev = { .dma_mask = &sa11x0ssp_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(sa11x0ssp_resources), .resource = sa11x0ssp_resources, }; static struct resource sa11x0fb_resources[] = { [0] = DEFINE_RES_MEM(0xb0100000, SZ_64K), [1] = DEFINE_RES_IRQ(IRQ_LCD), }; static struct platform_device sa11x0fb_device = { .name = "sa11x0-fb", .id = -1, .dev = { .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(sa11x0fb_resources), .resource = sa11x0fb_resources, }; void sa11x0_register_lcd(struct sa1100fb_mach_info *inf) { sa11x0_register_device(&sa11x0fb_device, inf); } static struct platform_device sa11x0pcmcia_device = { .name = "sa11x0-pcmcia", .id = -1, }; static struct platform_device sa11x0mtd_device = { .name = "sa1100-mtd", .id = -1, }; void sa11x0_register_mtd(struct flash_platform_data *flash, struct resource *res, int nr) { flash->name = "sa1100"; sa11x0mtd_device.resource = res; sa11x0mtd_device.num_resources = nr; sa11x0_register_device(&sa11x0mtd_device, flash); } static struct resource sa11x0ir_resources[] = { DEFINE_RES_MEM(__PREG(Ser2UTCR0), 0x24), DEFINE_RES_MEM(__PREG(Ser2HSCR0), 0x1c), DEFINE_RES_MEM(__PREG(Ser2HSCR2), 0x04), DEFINE_RES_IRQ(IRQ_Ser2ICP), }; static struct platform_device sa11x0ir_device = { .name = "sa11x0-ir", .id = -1, .num_resources = ARRAY_SIZE(sa11x0ir_resources), .resource = sa11x0ir_resources, }; void sa11x0_register_irda(struct irda_platform_data *irda) { sa11x0_register_device(&sa11x0ir_device, irda); } static struct resource sa1100_rtc_resources[] = { DEFINE_RES_MEM(0x90010000, 0x40), DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"), DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"), }; static struct platform_device sa11x0rtc_device = { .name = "sa1100-rtc", .id = -1, .num_resources = ARRAY_SIZE(sa1100_rtc_resources), .resource = sa1100_rtc_resources, }; static struct resource sa11x0dma_resources[] = { DEFINE_RES_MEM(DMA_PHYS, DMA_SIZE), DEFINE_RES_IRQ(IRQ_DMA0), DEFINE_RES_IRQ(IRQ_DMA1), DEFINE_RES_IRQ(IRQ_DMA2), DEFINE_RES_IRQ(IRQ_DMA3), DEFINE_RES_IRQ(IRQ_DMA4), DEFINE_RES_IRQ(IRQ_DMA5), }; static u64 sa11x0dma_dma_mask = DMA_BIT_MASK(32); static struct platform_device sa11x0dma_device = { .name = "sa11x0-dma", .id = -1, .dev = { .dma_mask = &sa11x0dma_dma_mask, .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(sa11x0dma_resources), .resource = sa11x0dma_resources, }; static struct platform_device *sa11x0_devices[] __initdata = { &sa11x0udc_device, &sa11x0uart1_device, &sa11x0uart3_device, &sa11x0ssp_device, &sa11x0pcmcia_device, &sa11x0rtc_device, &sa11x0dma_device, }; static int __init sa1100_init(void) { pm_power_off = sa1100_power_off; return platform_add_devices(sa11x0_devices, ARRAY_SIZE(sa11x0_devices)); } arch_initcall(sa1100_init); /* * Common I/O mapping: * * Typically, static virtual address mappings are as follow: * * 0xf0000000-0xf3ffffff: miscellaneous stuff (CPLDs, etc.) * 0xf4000000-0xf4ffffff: SA-1111 * 0xf5000000-0xf5ffffff: reserved (used by cache flushing area) * 0xf6000000-0xfffeffff: reserved (internal SA1100 IO defined above) * 0xffff0000-0xffff0fff: SA1100 exception vectors * 0xffff2000-0xffff2fff: Minicache copy_user_page area * * Below 0xe8000000 is reserved for vm allocation. * * The machine specific code must provide the extra mapping beside the * default mapping provided here. */ static struct map_desc standard_io_desc[] __initdata = { { /* PCM */ .virtual = 0xf8000000, .pfn = __phys_to_pfn(0x80000000), .length = 0x00100000, .type = MT_DEVICE }, { /* SCM */ .virtual = 0xfa000000, .pfn = __phys_to_pfn(0x90000000), .length = 0x00100000, .type = MT_DEVICE }, { /* MER */ .virtual = 0xfc000000, .pfn = __phys_to_pfn(0xa0000000), .length = 0x00100000, .type = MT_DEVICE }, { /* LCD + DMA */ .virtual = 0xfe000000, .pfn = __phys_to_pfn(0xb0000000), .length = 0x00200000, .type = MT_DEVICE }, }; void __init sa1100_map_io(void) { iotable_init(standard_io_desc, ARRAY_SIZE(standard_io_desc)); } /* * Disable the memory bus request/grant signals on the SA1110 to * ensure that we don't receive spurious memory requests. We set * the MBGNT signal false to ensure the SA1111 doesn't own the * SDRAM bus. */ void sa1110_mb_disable(void) { unsigned long flags; local_irq_save(flags); PGSR &= ~GPIO_MBGNT; GPCR = GPIO_MBGNT; GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT; GAFR &= ~(GPIO_MBGNT | GPIO_MBREQ); local_irq_restore(flags); } /* * If the system is going to use the SA-1111 DMA engines, set up * the memory bus request/grant pins. */ void sa1110_mb_enable(void) { unsigned long flags; local_irq_save(flags); PGSR &= ~GPIO_MBGNT; GPCR = GPIO_MBGNT; GPDR = (GPDR & ~GPIO_MBREQ) | GPIO_MBGNT; GAFR |= (GPIO_MBGNT | GPIO_MBREQ); TUCR |= TUCR_MR; local_irq_restore(flags); }
gpl-2.0